code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = KandinskyVaaControlnetImgaImgPipeline
_UpperCamelCase : Any = ["image_embeds", "negative_image_embeds", "image", "hint"]
_UpperCamelCase : Optional[int] = ["image_embeds", "negative_image_embeds", "image", "hint"]
_UpperCamelCase : Any = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Optional[Any] = False
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return 1_00
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_snake_case : List[str] = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : int = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Any = self.dummy_unet
_snake_case : Optional[Any] = self.dummy_movq
_snake_case : int = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_snake_case : int = DDIMScheduler(**lowerCamelCase_ )
_snake_case : Tuple = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any]=0 ):
'''simple docstring'''
_snake_case : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_snake_case : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
_snake_case : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case : int = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_56, 2_56) )
# create hint
_snake_case : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
else:
_snake_case : Dict = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_snake_case : Tuple = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = 'cpu'
_snake_case : Any = self.get_dummy_components()
_snake_case : List[Any] = self.pipeline_class(**lowerCamelCase_ )
_snake_case : str = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
_snake_case : List[Any] = output.images
_snake_case : Any = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : List[str] = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
_snake_case : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_snake_case : Dict = init_image.resize((5_12, 5_12) )
_snake_case : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
_snake_case : List[str] = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
_snake_case : str = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_snake_case : Union[str, Any] = 'A robot, 4k photo'
_snake_case : Union[str, Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
_snake_case : Optional[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
_snake_case : Tuple = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case , _snake_case : Any = pipe_prior(
lowerCamelCase_ , image=lowerCamelCase_ , strength=0.85 , generator=lowerCamelCase_ , negative_prompt='' , ).to_tuple()
_snake_case : str = pipeline(
image=lowerCamelCase_ , image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , hint=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='np' , )
_snake_case : Union[str, Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 304
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def A__( __lowerCAmelCase ):
_snake_case : Dict = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase ):
_snake_case , _snake_case : Any = emb.weight.shape
_snake_case : List[str] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
_snake_case : Optional[int] = emb.weight.data
return lin_layer
def A__( __lowerCAmelCase ):
_snake_case : List[str] = torch.load(__lowerCAmelCase , map_location='cpu' )
_snake_case : List[Any] = Namespace(**checkpoint['cfg']['model'] )
_snake_case : Optional[Any] = checkpoint['model']
remove_ignore_keys_(__lowerCAmelCase )
_snake_case : List[Any] = state_dict['decoder.embed_tokens.weight'].shape[0]
_snake_case : Dict = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
_snake_case : Optional[Any] = XGLMConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_snake_case : Optional[Any] = XGLMForCausalLM(__lowerCAmelCase )
_snake_case : Any = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
print(__lowerCAmelCase )
_snake_case : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase_ : Optional[Any] = parser.parse_args()
lowercase_ : List[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 304
| 1
|
"""simple docstring"""
from typing import Any
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = data
_lowerCamelCase = None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_lowerCamelCase = temp.next
print()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(lowerCamelCase__ )
_lowerCamelCase = self.head
_lowerCamelCase = new_node
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCamelCase , _lowerCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 705
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623
| 0
|
from PIL import Image
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
def brightness(lowerCAmelCase__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowerCAmelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__SCREAMING_SNAKE_CASE : int =change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 428
|
import os
from distutils.util import strtobool
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for e in env_keys:
lowercase = int(os.environ.get(lowerCAmelCase__ ,-1 ) )
if val >= 0:
return val
return default
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ):
lowercase = os.environ.get(lowerCAmelCase__ ,str(lowerCAmelCase__ ) )
return strtobool(lowerCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__="no" ):
lowercase = os.environ.get(lowerCAmelCase__ ,str(lowerCAmelCase__ ) )
return value
| 428
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16
| 0
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE = 'src/transformers'
SCREAMING_SNAKE_CASE = 'docs/source/en/tasks'
def lowercase_ ( __A : Union[str, Any] , __A : str , __A : Any ) -> Tuple:
"""simple docstring"""
with open(__A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : Optional[int] =f.readlines()
# Find the start prompt.
lowercase : List[str] =0
while not lines[start_index].startswith(__A ):
start_index += 1
start_index += 1
lowercase : List[Any] =start_index
while not lines[end_index].startswith(__A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowercase_ ( __A : Optional[int] ) -> str:
"""simple docstring"""
lowercase : Dict =TASK_GUIDE_TO_MODELS[task_guide]
lowercase : Tuple =SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__A , set() )
lowercase : Optional[Any] ={
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def lowercase_ ( __A : Optional[int] , __A : Union[str, Any]=False ) -> str:
"""simple docstring"""
lowercase , lowercase , lowercase , lowercase : List[str] =_find_text_in_file(
filename=os.path.join(__A , __A ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
lowercase : Dict =get_model_list_for_task(__A )
if current_list != new_list:
if overwrite:
with open(os.path.join(__A , __A ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
''' to fix this.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 94
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "openai/whisper-base"
UpperCAmelCase__ = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCAmelCase__ = "transcriber"
UpperCAmelCase__ = WhisperProcessor
UpperCAmelCase__ = WhisperForConditionalGeneration
UpperCAmelCase__ = ["audio"]
UpperCAmelCase__ = ["text"]
def lowerCamelCase__ ( self : Dict , __snake_case : List[str] ) -> Dict:
return self.pre_processor(__snake_case , return_tensors="""pt""" ).input_features
def lowerCamelCase__ ( self : int , __snake_case : Union[str, Any] ) -> List[str]:
return self.model.generate(inputs=__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : List[Any] ) -> Any:
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0]
| 96
| 0
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCamelCase = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[Any] , lowerCamelCase__ :Path , lowerCamelCase__ :Union[str, None] = None , lowerCamelCase__ :Union[List[str], None] = None , lowerCamelCase__ :Union[str, List[str], None] = None , lowerCamelCase__ :bool = True , ):
UpperCamelCase__ :str = [file for file in os.listdir(lowerCamelCase__ ) if os.path.isfile(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )]
if identifier is not None:
UpperCamelCase__ :Union[str, Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for n_ in n_identifier:
UpperCamelCase__ :int = [file for file in files if n_ not in file]
else:
UpperCamelCase__ :Dict = [file for file in files if n_identifier not in file]
UpperCamelCase__ :Any = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCamelCase__ :str = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , lowerCamelCase__ )
if only_modules:
UpperCamelCase__ :str = file.split(""".""" )[0]
try:
UpperCamelCase__ :str = getattr(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = doctest.DocTestSuite(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = unittest.TextTestRunner().run(lowerCamelCase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
UpperCamelCase__ :Any = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __a ( self :Any ):
UpperCamelCase__ :List[str] = Path("""src/transformers""" )
UpperCamelCase__ :List[Any] = """modeling"""
UpperCamelCase__ :Optional[Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowerCamelCase__ , identifier=lowerCamelCase__ , ignore_files=lowerCamelCase__ )
def __a ( self :int ):
UpperCamelCase__ :int = Path("""src/transformers""" )
UpperCamelCase__ :Optional[Any] = """tokenization"""
self.analyze_directory(lowerCamelCase__ , identifier=lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Dict = Path("""src/transformers""" )
UpperCamelCase__ :Optional[Any] = """configuration"""
self.analyze_directory(lowerCamelCase__ , identifier=lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[Any] = Path("""src/transformers""" )
UpperCamelCase__ :Dict = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowerCamelCase__ , n_identifier=lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = Path("""docs/source""" )
UpperCamelCase__ :Optional[int] = ["""favicon.ico"""]
self.analyze_directory(lowerCamelCase__ , ignore_files=lowerCamelCase__ , only_modules=lowerCamelCase__ )
| 706
|
def A ( lowercase__ : List[str] , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Union[str, Any] ) -> Tuple:
if index == r:
for j in range(lowercase__ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCamelCase__ :Union[str, Any] = arr[i]
combination_util(lowercase__ , lowercase__ , lowercase__ , index + 1 , lowercase__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Any ) -> Tuple:
# A temporary array to store all combination one by one
UpperCamelCase__ :int = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase__ , lowercase__ , lowercase__ , 0 , lowercase__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCamelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 383
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : int = "▁"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE : Optional[int] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
SCREAMING_SNAKE_CASE : List[Any] = {
"google/pegasus-xsum": 512,
}
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class _lowerCamelCase( _a ):
lowercase_ : Tuple = VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self, lowerCamelCase, lowerCamelCase="<pad>", lowerCamelCase="</s>", lowerCamelCase="<unk>", lowerCamelCase="<mask_2>", lowerCamelCase="<mask_1>", lowerCamelCase=None, lowerCamelCase=1_03, lowerCamelCase = None, **lowerCamelCase, ) -> None:
"""simple docstring"""
_lowercase : str = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase, lowerCamelCase):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowerCamelCase)}, but is'''
F''' {type(lowerCamelCase)}''')
_lowercase : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowerCamelCase), self.offset - 1)
]
if len(set(lowerCamelCase)) != len(lowerCamelCase):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''')
_lowercase : Optional[int] = additional_special_tokens_extended
else:
_lowercase : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset)]
_lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase, unk_token=lowerCamelCase, mask_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token_sent=lowerCamelCase, offset=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
_lowercase : str = mask_token_sent
_lowercase : Optional[int] = vocab_file
_lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase)
# add special tokens to encoder dict
_lowercase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
})
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1)})
_lowercase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return len(self.sp_model) + self.offset
def UpperCamelCase ( self) -> Dict[str, int]:
"""simple docstring"""
_lowercase : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
"""simple docstring"""
_lowercase : Optional[Any] = self.__dict__.copy()
_lowercase : Union[str, Any] = None
return state
def __setstate__( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Dict = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
_lowercase : Any = {}
_lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_lowercase : Any = self.sp_model.piece_to_id(lowerCamelCase)
return sp_id + self.offset
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_lowercase : Tuple = self.sp_model.IdToPiece(index - self.offset)
return token
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = []
_lowercase : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase) + token
_lowercase : List[str] = []
else:
current_sub_tokens.append(lowerCamelCase)
out_string += self.sp_model.decode(lowerCamelCase)
return out_string.strip()
def UpperCamelCase ( self, lowerCamelCase=False) -> Optional[Any]:
"""simple docstring"""
return 1
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : str = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase)
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Tuple = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase, 'wb') as fi:
_lowercase : Tuple = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase)
return (out_vocab_file,)
| 89
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
A = logging.get_logger(__name__)
class lowercase__ :
def __init__( self : List[str] , _lowercase : str = None , _lowercase : uuid.UUID = None , _lowercase : Dict=None , _lowercase : Any=None ):
"""simple docstring"""
if not conversation_id:
UpperCAmelCase__ = uuid.uuida()
if past_user_inputs is None:
UpperCAmelCase__ = []
if generated_responses is None:
UpperCAmelCase__ = []
UpperCAmelCase__ = conversation_id
UpperCAmelCase__ = past_user_inputs
UpperCAmelCase__ = generated_responses
UpperCAmelCase__ = text
def __eq__( self : Any , _lowercase : str ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCAmelCase ( self : Any , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
UpperCAmelCase__ = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
UpperCAmelCase__ = text
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCAmelCase__ = None
def _UpperCAmelCase ( self : List[str] , _lowercase : str ):
"""simple docstring"""
self.generated_responses.append(_lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
UpperCAmelCase__ = "user" if is_user else "bot"
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__SCREAMING_SNAKE_CASE , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , *_lowercase : Union[str, Any] , **_lowercase : Any ):
"""simple docstring"""
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
UpperCAmelCase__ = self.tokenizer.eos_token
def _UpperCAmelCase ( self : List[str] , _lowercase : int=None , _lowercase : Any=None , _lowercase : List[str]=None , **_lowercase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
if min_length_for_response is not None:
UpperCAmelCase__ = min_length_for_response
if minimum_tokens is not None:
UpperCAmelCase__ = minimum_tokens
if "max_length" in generate_kwargs:
UpperCAmelCase__ = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Dict , _lowercase : Union[Conversation, List[Conversation]] , _lowercase : List[str]=0 , **_lowercase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : Conversation , _lowercase : List[Any]=32 ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
UpperCAmelCase__ = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCAmelCase__ = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
UpperCAmelCase__ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCAmelCase__ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : List[Any] , _lowercase : List[Any]=10 , **_lowercase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length )
UpperCAmelCase__ = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
UpperCAmelCase__ = max_length - minimum_tokens
UpperCAmelCase__ = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
UpperCAmelCase__ = model_inputs["attention_mask"][:, -trim:]
UpperCAmelCase__ = model_inputs.pop("conversation" )
UpperCAmelCase__ = max_length
UpperCAmelCase__ = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
UpperCAmelCase__ = 1
else:
UpperCAmelCase__ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCAmelCase ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]=True ):
"""simple docstring"""
UpperCAmelCase__ = model_outputs["output_ids"]
UpperCAmelCase__ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
UpperCAmelCase__ = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def _UpperCAmelCase ( self : List[str] , _lowercase : Conversation ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer.eos_token_id
UpperCAmelCase__ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
UpperCAmelCase__ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 277
| 1
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( snake_case__ , unittest.TestCase ):
__UpperCamelCase = ProphetNetTokenizer
__UpperCamelCase = False
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__snake_case = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : Any ):
"""simple docstring"""
__snake_case = "UNwant\u00E9d,running"
__snake_case = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.tokenizer_class(self.vocab_file )
__snake_case = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_lowerCAmelCase ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[9, 6, 7, 12, 10, 11] )
def UpperCamelCase_ ( self : Any ):
"""simple docstring"""
__snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) ,["ah", "\u535A", "\u63A8", "zz"] )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) ,["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) ,["hello"] )
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) ,["h\u00E9llo"] )
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) ,["hello"] )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) ,["hello"] )
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) ,["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["HäLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) ,["HaLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=_lowerCAmelCase ,never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) ,["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__snake_case = {}
for i, token in enumerate(_lowerCAmelCase ):
__snake_case = i
__snake_case = WordpieceTokenizer(vocab=_lowerCAmelCase ,unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) ,[] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) ,["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) ,["[UNK]", "runn", "##ing"] )
@require_torch
def UpperCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
__snake_case = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__snake_case = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
__snake_case = tokenizer(_lowerCAmelCase ,padding=_lowerCAmelCase ,return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase )
__snake_case = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def UpperCamelCase_ ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
__snake_case = tokenizer.encode("sequence builders" ,add_special_tokens=_lowerCAmelCase )
__snake_case = tokenizer.encode("multi-sequence build" ,add_special_tokens=_lowerCAmelCase )
__snake_case = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__snake_case = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ,_lowerCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 524
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 524
| 1
|
"""simple docstring"""
# Function to print upper half of diamond (pyramid)
def UpperCamelCase_ ( lowerCamelCase : int ) -> List[Any]:
"""simple docstring"""
for i in range(0 , lowerCamelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def UpperCamelCase_ ( lowerCamelCase : List[str] ) -> str:
"""simple docstring"""
for i in range(lowerCamelCase , 0 , -1 ):
for _ in range(lowerCamelCase , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def UpperCamelCase_ ( lowerCamelCase : int ) -> Tuple:
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(lowerCamelCase ) # upper half
reverse_floyd(lowerCamelCase ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
A = 1
while K:
A = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
A = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 147
|
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
A = logging.get_logger(__name__)
A = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
A = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
A = {
"""facebook/blenderbot_small-90M""": 512,
}
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any] , snake_case : Any=None , snake_case : List[str]=None , snake_case : Optional[int]="<|endoftext|>" , snake_case : str="<|endoftext|>" , snake_case : Optional[Any]="<|endoftext|>" , snake_case : Union[str, Any]=False , snake_case : Union[str, Any]=True , **snake_case : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
__magic_name__ : str = add_prefix_space
def _UpperCAmelCase ( self : Optional[Any] , snake_case : Optional[int] , snake_case : Optional[int]=None ) -> int:
'''simple docstring'''
__magic_name__ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Optional[int] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__magic_name__ : Tuple = [self.sep_token_id]
__magic_name__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 147
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class a__ ( _lowercase ):
__magic_name__ : str = field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True} )
__magic_name__ : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
__magic_name__ : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
__magic_name__ : str = "question"
__magic_name__ : str = "context"
__magic_name__ : str = "answers"
@property
def lowercase__ (self : Tuple ) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 507
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __lowerCAmelCase ( A_ : Features ) -> Optional[int]:
__UpperCAmelCase = np.inf
def set_batch_size(A_ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(A_ , A_ ):
__UpperCAmelCase = min(A_ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(A_ , A_ ):
__UpperCAmelCase = min(A_ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(A_ , A_ ) and feature.dtype == "binary":
__UpperCAmelCase = min(A_ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(A_ , A_ )
return None if batch_size is np.inf else batch_size
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCAmelCase: NestedDataStructureLike[PathLike] , __lowerCAmelCase: Optional[NamedSplit] = None , __lowerCAmelCase: Optional[Features] = None , __lowerCAmelCase: str = None , __lowerCAmelCase: bool = False , __lowerCAmelCase: bool = False , __lowerCAmelCase: Optional[int] = None , **__lowerCAmelCase: List[Any] , ) -> Any:
'''simple docstring'''
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
__UpperCAmelCase = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths}
__UpperCAmelCase = _PACKAGED_DATASETS_MODULES["parquet"][1]
__UpperCAmelCase = Parquet(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , hash=__lowerCAmelCase , **__lowerCAmelCase , )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.streaming:
__UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
__UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: Optional[int] , __lowerCAmelCase: Dataset , __lowerCAmelCase: Union[PathLike, BinaryIO] , __lowerCAmelCase: Optional[int] = None , **__lowerCAmelCase: Optional[int] , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = dataset
__UpperCAmelCase = path_or_buf
__UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features )
__UpperCAmelCase = parquet_writer_kwargs
def _UpperCAmelCase ( self: Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
__UpperCAmelCase = self._write(file_obj=__lowerCAmelCase , batch_size=__lowerCAmelCase , **self.parquet_writer_kwargs )
else:
__UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__lowerCAmelCase , **self.parquet_writer_kwargs )
return written
def _UpperCAmelCase ( self: Tuple , __lowerCAmelCase: BinaryIO , __lowerCAmelCase: int , **__lowerCAmelCase: List[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = parquet_writer_kwargs.pop("path_or_buf" , __lowerCAmelCase )
__UpperCAmelCase = self.dataset.features.arrow_schema
__UpperCAmelCase = pq.ParquetWriter(__lowerCAmelCase , schema=__lowerCAmelCase , **__lowerCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __lowerCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
__UpperCAmelCase = query_table(
table=self.dataset._data , key=slice(__lowerCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written
| 221
| 0
|
"""simple docstring"""
import os
import sys
import unittest
_UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_UpperCamelCase = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_UpperCamelCase = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = get_test_to_tester_mapping(A )
UpperCAmelCase = get_test_to_tester_mapping(A )
UpperCAmelCase = {"""BertModelTest""": """BertModelTester"""}
UpperCAmelCase = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(A ) ,A )
self.assertEqual(get_test_info.to_json(A ) ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = get_model_to_test_mapping(A )
UpperCAmelCase = get_model_to_test_mapping(A )
UpperCAmelCase = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
UpperCAmelCase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(A ) ,A )
self.assertEqual(get_test_info.to_json(A ) ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = get_model_to_tester_mapping(A )
UpperCAmelCase = get_model_to_tester_mapping(A )
UpperCAmelCase = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
UpperCAmelCase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(A ) ,A )
self.assertEqual(get_test_info.to_json(A ) ,A )
| 74
|
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ):
"""simple docstring"""
return len(set(_snake_case ) ) == len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
| 1
|
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __A ( ):
_UpperCAmelCase : Dict = input("""Enter message: """ )
_UpperCAmelCase : Optional[int] = input("""Enter key [alphanumeric]: """ )
_UpperCAmelCase : int = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
_UpperCAmelCase : List[Any] = """encrypt"""
_UpperCAmelCase : str = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
elif mode.lower().startswith("""d""" ):
_UpperCAmelCase : Union[str, Any] = """decrypt"""
_UpperCAmelCase : Optional[Any] = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"\n{mode.title()}ed message:" )
print(lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , """encrypt""" )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , """decrypt""" )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Optional[Any] = key.upper()
for symbol in message:
_UpperCAmelCase : Optional[int] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = 0
else:
translated.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 414
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : str = {}
_UpperCAmelCase : Optional[Any] = job["""started_at"""]
_UpperCAmelCase : List[Any] = job["""completed_at"""]
_UpperCAmelCase : Optional[int] = date_parser.parse(lowerCAmelCase_ )
_UpperCAmelCase : str = date_parser.parse(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase : Tuple = start
_UpperCAmelCase : str = end
_UpperCAmelCase : List[Any] = duration_in_min
return job_info
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None ):
_UpperCAmelCase : str = None
if token is not None:
_UpperCAmelCase : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
_UpperCAmelCase : Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_UpperCAmelCase : Union[str, Any] = requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
_UpperCAmelCase : int = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowerCAmelCase_ ) for job in result["""jobs"""]} )
_UpperCAmelCase : str = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = requests.get(url + f"&page={i + 2}" , headers=lowerCAmelCase_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowerCAmelCase_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
lowerCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
lowerCAmelCase_ : int = get_job_time(args.workflow_run_id)
lowerCAmelCase_ : Optional[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"{k}: {v['duration']}")
| 414
| 1
|
import argparse
import json
import subprocess
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Tuple ) -> Dict:
"""simple docstring"""
a_ = []
a_ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
a_ = subprocess.run(UpperCamelCase , shell=UpperCamelCase , stdout=subprocess.PIPE )
a_ = output.stdout.decode("""utf-8""" )
a_ = json.loads(UpperCamelCase )
a_ = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(UpperCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
a_ = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
return values.split(""",""" )
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_A = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 712
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_A = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
a_ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
a_ = json.loads(UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
a_ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
a_ = json.loads(UpperCamelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def __magic_name__ ( self ):
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _SCREAMING_SNAKE_CASE , )
@cached_property
def __magic_name__ ( self ):
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
a_ = torch.device("""cpu""" )
a_ = 0
elif is_sagemaker_model_parallel_available():
a_ = smp.local_rank()
a_ = torch.device("""cuda""" , _SCREAMING_SNAKE_CASE )
a_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
a_ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
a_ = torch.device("""cuda""" , self.local_rank )
a_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
a_ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
a_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
a_ = torch.device("""cuda""" , self.local_rank )
a_ = 1
if device.type == "cuda":
torch.cuda.set_device(_SCREAMING_SNAKE_CASE )
return device
@property
def __magic_name__ ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __magic_name__ ( self ):
return not is_sagemaker_model_parallel_available()
@property
def __magic_name__ ( self ):
return False
| 403
| 0
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( a : Optional[Any] ) -> Any:
"""simple docstring"""
return EnvironmentCommand()
def lowerCamelCase__ ( a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class lowerCAmelCase_ ( lowerCamelCase__):
@staticmethod
def _snake_case ( __A : ArgumentParser ) ->Union[str, Any]:
"""simple docstring"""
a__ :Optional[int] = parser.add_parser("env" )
download_parser.set_defaults(func=__A )
download_parser.add_argument(
"--accelerate-config_file" , default=__A , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=__A )
def __init__( self : Dict , __A : Tuple , *__A : Optional[int] ) ->str:
"""simple docstring"""
a__ :List[Any] = accelerate_config_file
def _snake_case ( self : str ) ->Union[str, Any]:
"""simple docstring"""
a__ :List[str] = "not installed"
if is_safetensors_available():
import safetensors
a__ :Union[str, Any] = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
a__ :Union[str, Any] = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
a__ :Union[str, Any] = "not installed"
a__ :Optional[Any] = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
a__ :str = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__A ):
a__ :List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
a__ :Optional[Any] = (
"\n".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__A , __A )
else F'''\t{accelerate_config}'''
)
a__ :int = "not installed"
a__ :Union[str, Any] = "NA"
if is_torch_available():
import torch
a__ :List[str] = torch.__version__
a__ :Tuple = torch.cuda.is_available()
a__ :List[str] = "not installed"
a__ :Any = "NA"
if is_tf_available():
import tensorflow as tf
a__ :Optional[Any] = tf.__version__
try:
# deprecated in v2.1
a__ :Optional[int] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
a__ :Union[str, Any] = bool(tf.config.list_physical_devices("GPU" ) )
a__ :Union[str, Any] = "not installed"
a__ :Any = "not installed"
a__ :Union[str, Any] = "not installed"
a__ :Tuple = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
a__ :Any = flax.__version__
a__ :Union[str, Any] = jax.__version__
a__ :int = jaxlib.__version__
a__ :Optional[int] = jax.lib.xla_bridge.get_backend().platform
a__ :Tuple = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F'''{safetensors_version}''',
"Accelerate version": F'''{accelerate_version}''',
"Accelerate config": F'''{accelerate_config_str}''',
"PyTorch version (GPU?)": F'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": F'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": F'''{flax_version} ({jax_backend})''',
"Jax version": F'''{jax_version}''',
"JaxLib version": F'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(__A ) )
return info
@staticmethod
def _snake_case ( __A : Union[str, Any] ) ->Any:
"""simple docstring"""
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 395
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
__magic_name__ : Dict = list[list[float | int]]
def A__ ( A_ , A_ ) -> Matrix:
_lowercase = len(A_ )
_lowercase = [[0 for _ in range(size + 1 )] for _ in range(A_ )]
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
for row in range(A_ ):
for col in range(A_ ):
_lowercase = matrix[row][col]
_lowercase = vector[row][0]
_lowercase = 0
_lowercase = 0
while row < size and col < size:
# pivoting
_lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(A_ , A_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , A_ ):
_lowercase = augmented[rowa][col] / augmented[row][col]
_lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , A_ ):
for row in range(A_ ):
_lowercase = augmented[row][col] / augmented[col][col]
for cola in range(A_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(A_ )
]
def A__ ( A_ ) -> Callable[[int], int]:
_lowercase = len(A_ )
_lowercase = [[0 for _ in range(A_ )] for _ in range(A_ )]
_lowercase = [[0] for _ in range(A_ )]
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
for x_val, y_val in enumerate(A_ ):
for col in range(A_ ):
_lowercase = (x_val + 1) ** (size - col - 1)
_lowercase = y_val
_lowercase = solve(A_ , A_ )
def interpolated_func(A_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(A_ ) )
return interpolated_func
def A__ ( A_ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( A_ = question_function , A_ = 10 ) -> int:
_lowercase = [func(A_ ) for x_val in range(1 , order + 1 )]
_lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase = 0
_lowercase = 42
_lowercase = 42
for poly in polynomials:
_lowercase = 1
while func(A_ ) == poly(A_ ):
x_val += 1
ret += poly(A_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 497
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( snake_case_ : Optional[int] ) -> Any:
'''simple docstring'''
if "resnet-50" in model_name:
__lowerCAmelCase = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
__lowerCAmelCase = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
__lowerCAmelCase = DetrConfig(use_timm_backbone=snake_case_ , backbone_config=snake_case_ )
# set label attributes
__lowerCAmelCase = """panoptic""" in model_name
if is_panoptic:
__lowerCAmelCase = 2_50
else:
__lowerCAmelCase = 91
__lowerCAmelCase = """huggingface/label-files"""
__lowerCAmelCase = """coco-detection-id2label.json"""
__lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> Any:
'''simple docstring'''
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def UpperCamelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : str ) -> str:
'''simple docstring'''
__lowerCAmelCase = state_dict.pop(snake_case_ )
__lowerCAmelCase = val
def UpperCamelCase_ ( snake_case_ : Optional[int] , snake_case_ : Optional[Any]=False ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = """"""
if is_panoptic:
__lowerCAmelCase = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:2_56, :]
__lowerCAmelCase = in_proj_bias[:2_56]
__lowerCAmelCase = in_proj_weight[2_56:5_12, :]
__lowerCAmelCase = in_proj_bias[2_56:5_12]
__lowerCAmelCase = in_proj_weight[-2_56:, :]
__lowerCAmelCase = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCAmelCase = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:2_56, :]
__lowerCAmelCase = in_proj_bias[:2_56]
__lowerCAmelCase = in_proj_weight[2_56:5_12, :]
__lowerCAmelCase = in_proj_bias[2_56:5_12]
__lowerCAmelCase = in_proj_weight[-2_56:, :]
__lowerCAmelCase = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
__lowerCAmelCase = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCAmelCase = in_proj_weight_cross_attn[:2_56, :]
__lowerCAmelCase = in_proj_bias_cross_attn[:2_56]
__lowerCAmelCase = in_proj_weight_cross_attn[2_56:5_12, :]
__lowerCAmelCase = in_proj_bias_cross_attn[2_56:5_12]
__lowerCAmelCase = in_proj_weight_cross_attn[-2_56:, :]
__lowerCAmelCase = in_proj_bias_cross_attn[-2_56:]
def UpperCamelCase_ ( ) -> str:
'''simple docstring'''
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( snake_case_ : List[str] , snake_case_ : str=None , snake_case_ : str=False ) -> Dict:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = get_detr_config(snake_case_ )
# load original model from torch hub
__lowerCAmelCase = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(f"""Converting model {model_name}...""" )
__lowerCAmelCase = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=snake_case_ ).eval()
__lowerCAmelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case_ ):
if is_panoptic:
__lowerCAmelCase = """detr.""" + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
__lowerCAmelCase = state_dict.pop(snake_case_ )
__lowerCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCAmelCase = state_dict.pop(snake_case_ )
__lowerCAmelCase = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
__lowerCAmelCase = state_dict.pop(snake_case_ )
__lowerCAmelCase = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__lowerCAmelCase = state_dict.pop(snake_case_ )
__lowerCAmelCase = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase = DetrForSegmentation(snake_case_ ) if is_panoptic else DetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# verify our conversion on an image
__lowerCAmelCase = """coco_panoptic""" if is_panoptic else """coco_detection"""
__lowerCAmelCase = DetrImageProcessor(format=snake_case_ )
__lowerCAmelCase = processor(images=prepare_img() , return_tensors="""pt""" )
__lowerCAmelCase = encoding["""pixel_values"""]
__lowerCAmelCase = detr(snake_case_ )
__lowerCAmelCase = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(f"""nielsr/{model_name}""" )
processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
_A : List[Any] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 330
|
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A : str = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple=1_25 , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"""<extra_id_{i}>""" for i in range(SCREAMING_SNAKE_CASE__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda SCREAMING_SNAKE_CASE__ : bool("""extra_id""" in str(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
__lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
__lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
__lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
super().__init__(
eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , extra_ids=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ )
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def a ( self : str ) -> Tuple:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def a ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def a ( self : str , SCREAMING_SNAKE_CASE__ : List[int] ) -> List[int]:
if len(SCREAMING_SNAKE_CASE__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__ )
return token_ids_a + token_ids_a
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
__lowerCAmelCase = [chr(SCREAMING_SNAKE_CASE__ ) for i in text.encode("""utf-8""" )]
return tokens
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(SCREAMING_SNAKE_CASE__ ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(SCREAMING_SNAKE_CASE__ ) + self._num_special_tokens
return token_id
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
__lowerCAmelCase = B""""""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("""utf-8""" )
else:
__lowerCAmelCase = bytes([ord(SCREAMING_SNAKE_CASE__ )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
return ()
| 330
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[str] = 'encoder-decoder'
a :Any = True
def __init__( self : List[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase_ = kwargs.pop('''encoder''' )
lowercase_ = encoder_config.pop('''model_type''' )
lowercase_ = kwargs.pop('''decoder''' )
lowercase_ = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase_ = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = True
@classmethod
def _lowercase ( cls : Optional[int] , SCREAMING_SNAKE_CASE_ : PretrainedConfig , SCREAMING_SNAKE_CASE_ : PretrainedConfig , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase_ = True
lowercase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> List[Any]:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.encoder.to_dict()
lowercase_ = self.decoder.to_dict()
lowercase_ = self.__class__.model_type
return output
| 97
|
from __future__ import annotations
from math import pow, sqrt
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase_ ,2 ) - pow(UpperCamelCase_ ,2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase_ ,2 ) - pow(UpperCamelCase_ ,2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase_ ,2 ) + pow(UpperCamelCase_ ,2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 550
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a : str = logging.get_logger(__name__)
a : List[str] = {"vocab_file": "spiece.model"}
a : Tuple = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class a ( _UpperCAmelCase ):
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[int]=True , lowercase_ : Any=False , lowercase_ : Optional[int]="<s>" , lowercase_ : int="</s>" , lowercase_ : Dict="<unk>" , lowercase_ : Tuple="<sep>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : str="<cls>" , lowercase_ : Union[str, Any]="<mask>" , lowercase_ : List[Any]=["<eop>", "<eod>"] , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : int , ):
snake_case_ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
snake_case_ = 3
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
snake_case_ = jieba
snake_case_ = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def A_ ( self : Dict ):
return len(self.sp_model )
def A_ ( self : str ):
snake_case_ = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Any , lowercase_ : Dict ):
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Dict , lowercase_ : Dict ):
if self.remove_space:
snake_case_ = ''' '''.join(inputs.strip().split() )
else:
snake_case_ = inputs
snake_case_ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
snake_case_ = unicodedata.normalize('''NFKD''' , lowerCamelCase_ )
snake_case_ = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
snake_case_ = outputs.lower()
return outputs
def A_ ( self : Tuple , lowercase_ : str ):
snake_case_ = self.preprocess_text(lowerCamelCase_ )
snake_case_ = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
snake_case_ = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
snake_case_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ = cur_pieces[1:]
else:
snake_case_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def A_ ( self : Optional[int] , lowercase_ : Optional[Any] ):
return self.sp_model.PieceToId(lowerCamelCase_ )
def A_ ( self : Tuple , lowercase_ : str ):
return self.sp_model.IdToPiece(lowerCamelCase_ )
def A_ ( self : Any , lowercase_ : Union[str, Any] ):
snake_case_ = ''''''.join(lowerCamelCase_ ).replace(lowerCamelCase_ , ''' ''' ).strip()
return out_string
def A_ ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A_ ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1, 1]
return ([0] * len(lowerCamelCase_ )) + [1, 1]
def A_ ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def A_ ( self : Optional[int] , *lowercase_ : str , **lowercase_ : Dict ):
snake_case_ = super()._decode(*lowerCamelCase_ , **lowerCamelCase_ )
snake_case_ = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 718
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
a : int = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
a : str = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ), dtype=__UpperCAmelCase )[0]
@deprecated(__UpperCAmelCase, '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
print('''Extracting''', f.name )
with gzip.GzipFile(fileobj=__UpperCAmelCase ) as bytestream:
snake_case_ = _readaa(__UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
snake_case_ = _readaa(__UpperCAmelCase )
snake_case_ = _readaa(__UpperCAmelCase )
snake_case_ = _readaa(__UpperCAmelCase )
snake_case_ = bytestream.read(rows * cols * num_images )
snake_case_ = numpy.frombuffer(__UpperCAmelCase, dtype=numpy.uinta )
snake_case_ = data.reshape(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, 1 )
return data
@deprecated(__UpperCAmelCase, '''Please use tf.one_hot on tensors.''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = labels_dense.shape[0]
snake_case_ = numpy.arange(__UpperCAmelCase ) * num_classes
snake_case_ = numpy.zeros((num_labels, num_classes) )
snake_case_ = 1
return labels_one_hot
@deprecated(__UpperCAmelCase, '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=False, __UpperCAmelCase=10 ) -> Dict:
'''simple docstring'''
print('''Extracting''', f.name )
with gzip.GzipFile(fileobj=__UpperCAmelCase ) as bytestream:
snake_case_ = _readaa(__UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
snake_case_ = _readaa(__UpperCAmelCase )
snake_case_ = bytestream.read(__UpperCAmelCase )
snake_case_ = numpy.frombuffer(__UpperCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__UpperCAmelCase, __UpperCAmelCase )
return labels
class a :
@deprecated(
lowercase_ , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Tuple=False , lowercase_ : Tuple=False , lowercase_ : Optional[Any]=dtypes.floataa , lowercase_ : Any=True , lowercase_ : Optional[int]=None , ):
snake_case_ ,snake_case_ = random_seed.get_seed(lowercase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
snake_case_ = dtypes.as_dtype(lowercase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
snake_case_ = 1_0000
snake_case_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"images.shape: {images.shape} labels.shape: {labels.shape}"
snake_case_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
snake_case_ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
snake_case_ = images.astype(numpy.floataa )
snake_case_ = numpy.multiply(lowercase_ , 1.0 / 255.0 )
snake_case_ = images
snake_case_ = labels
snake_case_ = 0
snake_case_ = 0
@property
def A_ ( self : int ):
return self._images
@property
def A_ ( self : Tuple ):
return self._labels
@property
def A_ ( self : str ):
return self._num_examples
@property
def A_ ( self : List[str] ):
return self._epochs_completed
def A_ ( self : str , lowercase_ : List[str] , lowercase_ : Optional[int]=False , lowercase_ : Dict=True ):
if fake_data:
snake_case_ = [1] * 784
snake_case_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowercase_ )],
[fake_label for _ in range(lowercase_ )],
)
snake_case_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
snake_case_ = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase_ )
snake_case_ = self.images[perma]
snake_case_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
snake_case_ = self._num_examples - start
snake_case_ = self._images[start : self._num_examples]
snake_case_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
snake_case_ = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase_ )
snake_case_ = self.images[perm]
snake_case_ = self.labels[perm]
# Start next epoch
snake_case_ = 0
snake_case_ = batch_size - rest_num_examples
snake_case_ = self._index_in_epoch
snake_case_ = self._images[start:end]
snake_case_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
snake_case_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__UpperCAmelCase, '''Please write your own downloading logic.''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
if not gfile.Exists(__UpperCAmelCase ):
gfile.MakeDirs(__UpperCAmelCase )
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
if not gfile.Exists(__UpperCAmelCase ):
urllib.request.urlretrieve(__UpperCAmelCase, __UpperCAmelCase ) # noqa: S310
with gfile.GFile(__UpperCAmelCase ) as f:
snake_case_ = f.size()
print('''Successfully downloaded''', __UpperCAmelCase, __UpperCAmelCase, '''bytes.''' )
return filepath
@deprecated(
__UpperCAmelCase, '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=False, __UpperCAmelCase=False, __UpperCAmelCase=dtypes.floataa, __UpperCAmelCase=True, __UpperCAmelCase=5000, __UpperCAmelCase=None, __UpperCAmelCase=DEFAULT_SOURCE_URL, ) -> Tuple:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=__UpperCAmelCase, one_hot=__UpperCAmelCase, dtype=__UpperCAmelCase, seed=__UpperCAmelCase )
snake_case_ = fake()
snake_case_ = fake()
snake_case_ = fake()
return _Datasets(train=__UpperCAmelCase, validation=__UpperCAmelCase, test=__UpperCAmelCase )
if not source_url: # empty string check
snake_case_ = DEFAULT_SOURCE_URL
snake_case_ = '''train-images-idx3-ubyte.gz'''
snake_case_ = '''train-labels-idx1-ubyte.gz'''
snake_case_ = '''t10k-images-idx3-ubyte.gz'''
snake_case_ = '''t10k-labels-idx1-ubyte.gz'''
snake_case_ = _maybe_download(
__UpperCAmelCase, __UpperCAmelCase, source_url + train_images_file )
with gfile.Open(__UpperCAmelCase, '''rb''' ) as f:
snake_case_ = _extract_images(__UpperCAmelCase )
snake_case_ = _maybe_download(
__UpperCAmelCase, __UpperCAmelCase, source_url + train_labels_file )
with gfile.Open(__UpperCAmelCase, '''rb''' ) as f:
snake_case_ = _extract_labels(__UpperCAmelCase, one_hot=__UpperCAmelCase )
snake_case_ = _maybe_download(
__UpperCAmelCase, __UpperCAmelCase, source_url + test_images_file )
with gfile.Open(__UpperCAmelCase, '''rb''' ) as f:
snake_case_ = _extract_images(__UpperCAmelCase )
snake_case_ = _maybe_download(
__UpperCAmelCase, __UpperCAmelCase, source_url + test_labels_file )
with gfile.Open(__UpperCAmelCase, '''rb''' ) as f:
snake_case_ = _extract_labels(__UpperCAmelCase, one_hot=__UpperCAmelCase )
if not 0 <= validation_size <= len(__UpperCAmelCase ):
snake_case_ = (
'''Validation size should be between 0 and '''
F"{len(__UpperCAmelCase )}. Received: {validation_size}."
)
raise ValueError(__UpperCAmelCase )
snake_case_ = train_images[:validation_size]
snake_case_ = train_labels[:validation_size]
snake_case_ = train_images[validation_size:]
snake_case_ = train_labels[validation_size:]
snake_case_ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
snake_case_ = _DataSet(__UpperCAmelCase, __UpperCAmelCase, **__UpperCAmelCase )
snake_case_ = _DataSet(__UpperCAmelCase, __UpperCAmelCase, **__UpperCAmelCase )
snake_case_ = _DataSet(__UpperCAmelCase, __UpperCAmelCase, **__UpperCAmelCase )
return _Datasets(train=__UpperCAmelCase, validation=__UpperCAmelCase, test=__UpperCAmelCase )
| 593
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case : Union[str, Any] = 16
_snake_case : Optional[Any] = 32
def a_ ( lowerCAmelCase_ : Accelerator, lowerCAmelCase_ : int = 16 ):
__lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCAmelCase = load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ : Any ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case : Union[str, Any] = mocked_dataloaders # noqa: F811
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
__lowerCAmelCase = 2
# Initialize accelerator
__lowerCAmelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config['lr']
__lowerCAmelCase = int(config['num_epochs'] )
__lowerCAmelCase = int(config['seed'] )
__lowerCAmelCase = int(config['batch_size'] )
__lowerCAmelCase = evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
__lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
__lowerCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.loss
__lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__lowerCAmelCase = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 53
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=1_0 , __magic_name__=3 , __magic_name__=2 , __magic_name__=2 , __magic_name__=2 , __magic_name__=True , __magic_name__=True , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0 , __magic_name__=0.02 , __magic_name__=0.9 , __magic_name__=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = patch_size
_lowerCAmelCase = tubelet_size
_lowerCAmelCase = num_frames
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = mask_ratio
_lowerCAmelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_lowerCAmelCase = int(mask_ratio * self.seq_length )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = VideoMAEModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = VideoMAEForPreTraining(__magic_name__ )
model.to(__magic_name__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowerCAmelCase = torch.ones((self.num_masks,) )
_lowerCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_lowerCAmelCase = mask.expand(self.batch_size , -1 ).bool()
_lowerCAmelCase = model(__magic_name__ , __magic_name__ )
# model only returns predictions for masked patches
_lowerCAmelCase = mask.sum().item()
_lowerCAmelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : List[str] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCamelCase : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Dict = False
UpperCamelCase : int = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : List[str] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = VideoMAEModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=3_7 )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
_lowerCAmelCase = copy.deepcopy(__magic_name__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowerCAmelCase = torch.ones((self.model_tester.num_masks,) )
_lowerCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_lowerCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
_lowerCAmelCase = bool_masked_pos.to(__magic_name__ )
if return_labels:
if model_class in [
*get_values(__magic_name__ ),
]:
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = VideoMAEModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_lowerCAmelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_lowerCAmelCase = len(__magic_name__ )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
_lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_lowerCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video', filename='eating_spaghetti.npy', repo_type='dataset' )
_lowerCAmelCase = np.load(__lowerCamelCase )
return list(__lowerCamelCase )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
__magic_name__ )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_video()
_lowerCAmelCase = image_processor(__magic_name__ , return_tensors='pt' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**__magic_name__ )
# verify the logits
_lowerCAmelCase = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
_lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(__magic_name__ )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_video()
_lowerCAmelCase = image_processor(__magic_name__ , return_tensors='pt' ).to(__magic_name__ )
# add boolean mask, indicating which patches to mask
_lowerCAmelCase = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_lowerCAmelCase = torch.load(__magic_name__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**__magic_name__ )
# verify the logits
_lowerCAmelCase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
_lowerCAmelCase = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=__magic_name__ )
self.assertEqual(outputs.logits.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_lowerCAmelCase = torch.tensor([0.51_42] , device=__magic_name__ )
self.assertTrue(torch.allclose(outputs.loss , __magic_name__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_lowerCAmelCase = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=__magic_name__ ).to(
__magic_name__ )
with torch.no_grad():
_lowerCAmelCase = model(**__magic_name__ )
_lowerCAmelCase = torch.tensor(torch.tensor([0.64_69] ) , device=__magic_name__ )
self.assertTrue(torch.allclose(outputs.loss , __magic_name__ , atol=1e-4 ) )
| 589
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def UpperCAmelCase_ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
a_ =1
a_ =2
for i in range(2 , max_n + 1 ):
a_ =pre_numerator
a_ =2 * i // 3 if i % 3 == 0 else 1
a_ =cur_numerator
a_ =e_cont * pre_numerator + temp
return sum_digits(__UpperCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 713
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 0
|
from __future__ import annotations
import math
a__ : Tuple = '2020.9.26'
a__ : Optional[int] = 'xcodz-dot, cclaus, dhruvmanila'
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float , _UpperCAmelCase :float , _UpperCAmelCase :float , _UpperCAmelCase :float ) -> tuple[float, float]:
'''simple docstring'''
if not all(isinstance(_UpperCAmelCase , (float, int) ) for val in locals().values() ):
A_ = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(_UpperCAmelCase )
A_ = ((x * distance) / (z + distance)) * scale
A_ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :float , _UpperCAmelCase :float , _UpperCAmelCase :str , _UpperCAmelCase :float ) -> tuple[float, float, float]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''Axis must be a str''' )
A_ = locals()
del input_variables["axis"]
if not all(isinstance(_UpperCAmelCase , (float, int) ) for val in input_variables.values() ):
A_ = (
'''Input values except axis must either be float or int: '''
f'{list(input_variables.values() )}'
)
raise TypeError(_UpperCAmelCase )
A_ = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
A_ = x * math.cos(_UpperCAmelCase ) - y * math.sin(_UpperCAmelCase )
A_ = y * math.cos(_UpperCAmelCase ) + x * math.sin(_UpperCAmelCase )
A_ = z
elif axis == "x":
A_ = y * math.cos(_UpperCAmelCase ) - z * math.sin(_UpperCAmelCase )
A_ = z * math.cos(_UpperCAmelCase ) + y * math.sin(_UpperCAmelCase )
A_ = x
elif axis == "y":
A_ = x * math.cos(_UpperCAmelCase ) - z * math.sin(_UpperCAmelCase )
A_ = z * math.cos(_UpperCAmelCase ) + x * math.sin(_UpperCAmelCase )
A_ = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''')
| 188
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : str = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 188
| 1
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def A_ ( snake_case , snake_case , snake_case , snake_case ):
if isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = np.full((len(snake_case ), sequence_length, 2) , snake_case )
else:
SCREAMING_SNAKE_CASE:str = np.full((len(snake_case ), sequence_length) , snake_case )
for i, tensor in enumerate(snake_case ):
if padding_side == "right":
if isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Any = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE:str = tensor[:sequence_length]
else:
if isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Any = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE:str = tensor[:sequence_length]
return out_tensor.tolist()
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = ord(snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
SCREAMING_SNAKE_CASE:Tuple = unicodedata.category(snake_case )
if cat.startswith("P" ):
return True
return False
@dataclass
class _snake_case ( _a ):
_A : PreTrainedTokenizerBase
_A : Union[bool, str, PaddingStrategy] = True
_A : Optional[int] = None
_A : Optional[int] = None
_A : int = -1_0_0
_A : str = "pt"
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
import torch
SCREAMING_SNAKE_CASE:List[str] = "label" if "label" in features[0].keys() else "labels"
SCREAMING_SNAKE_CASE:List[str] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
SCREAMING_SNAKE_CASE:Tuple = self.tokenizer.pad(
SCREAMING_SNAKE_CASE__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" if labels is None else None ,)
if labels is None:
return batch
SCREAMING_SNAKE_CASE:str = torch.tensor(batch["entity_ids"] ).shape[1]
SCREAMING_SNAKE_CASE:Optional[Any] = self.tokenizer.padding_side
if padding_side == "right":
SCREAMING_SNAKE_CASE:Union[str, Any] = [
list(SCREAMING_SNAKE_CASE__ ) + [self.label_pad_token_id] * (sequence_length - len(SCREAMING_SNAKE_CASE__ )) for label in labels
]
else:
SCREAMING_SNAKE_CASE:Dict = [
[self.label_pad_token_id] * (sequence_length - len(SCREAMING_SNAKE_CASE__ )) + list(SCREAMING_SNAKE_CASE__ ) for label in labels
]
SCREAMING_SNAKE_CASE:str = [feature["ner_tags"] for feature in features]
SCREAMING_SNAKE_CASE:Tuple = padding_tensor(SCREAMING_SNAKE_CASE__ ,-1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = [feature["original_entity_spans"] for feature in features]
SCREAMING_SNAKE_CASE:Union[str, Any] = padding_tensor(SCREAMING_SNAKE_CASE__ ,(-1, -1) ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = {k: torch.tensor(SCREAMING_SNAKE_CASE__ ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 717
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
A_ = "true"
def A_ ( snake_case , snake_case=82 , snake_case=16 ):
set_seed(42 )
SCREAMING_SNAKE_CASE:Optional[int] = RegressionModel()
SCREAMING_SNAKE_CASE:int = deepcopy(snake_case )
SCREAMING_SNAKE_CASE:List[str] = RegressionDataset(length=snake_case )
SCREAMING_SNAKE_CASE:int = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Dict = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def A_ ( snake_case , snake_case=False ):
SCREAMING_SNAKE_CASE:List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
SCREAMING_SNAKE_CASE:Dict = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE:int = dataset.map(
snake_case , batched=snake_case , remove_columns=["idx", "sentence1", "sentence2"] , )
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(snake_case ):
if use_longest:
return tokenizer.pad(snake_case , padding="longest" , return_tensors="pt" )
return tokenizer.pad(snake_case , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
SCREAMING_SNAKE_CASE:List[Any] = get_dataloader(snake_case , not dispatch_batches )
SCREAMING_SNAKE_CASE:Tuple = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE:int = model(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Union[str, Any] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def A_ ( snake_case , snake_case=82 , snake_case=False , snake_case=False , snake_case=16 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = get_basic_setup(snake_case , snake_case , snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}'''
def A_ ( snake_case = False , snake_case = False ):
SCREAMING_SNAKE_CASE:List[Any] = evaluate.load("glue" , "mrpc" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Dict = get_mrpc_setup(snake_case , snake_case )
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = setup["no"]
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
SCREAMING_SNAKE_CASE:Dict = model(**snake_case )
SCREAMING_SNAKE_CASE:Any = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch["labels"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE:Any = model(**snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE:List[str] = batch["labels"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
SCREAMING_SNAKE_CASE:Any = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def A_ ( ):
SCREAMING_SNAKE_CASE:Optional[int] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE:Optional[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
SCREAMING_SNAKE_CASE:List[str] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def A_ ( snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 465
| 0
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str=2 , _UpperCamelCase : Dict=8 , _UpperCamelCase : str=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : Any=1_6 , _UpperCamelCase : List[str]=5 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : str=3_6 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : str=0.0 , _UpperCamelCase : Any=5_1_2 , _UpperCamelCase : int=1_6 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=4 , _UpperCamelCase : Dict=None , ) ->Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__( self : List[Any] ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : Optional[int] ) ->Optional[Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__( self : List[Any] ) ->str:
snake_case_ = self.get_config()
snake_case_ = 3_0_0
return config
def snake_case__( self : List[str] ) ->Optional[Any]:
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = self.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case__( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Dict ) ->Dict:
snake_case_ = MraModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , ) ->Optional[Any]:
snake_case_ = True
snake_case_ = MraModel(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )
snake_case_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ) ->str:
snake_case_ = MraForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->Any:
snake_case_ = MraForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : List[str] ) ->Optional[Any]:
snake_case_ = self.num_labels
snake_case_ = MraForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) ->int:
snake_case_ = self.num_labels
snake_case_ = MraForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) ->Any:
snake_case_ = self.num_choices
snake_case_ = MraForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__( self : str ) ->List[str]:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : List[str] = ()
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = MraModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : Dict ) ->Optional[int]:
self.config_tester.run_common_tests()
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : int ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def snake_case__( self : List[str] ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def snake_case__( self : Any ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def snake_case__( self : str ) ->List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def snake_case__( self : int ) ->Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : int ) ->str:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MraModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def snake_case__( self : Union[str, Any] ) ->Tuple:
return
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : List[Any] ) ->Optional[Any]:
snake_case_ = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
snake_case_ = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->int:
snake_case_ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
snake_case_ = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 5_0_2_6_5
snake_case_ = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
snake_case_ = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 5_0_2_6_5
snake_case_ = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
| 39
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : List[Any] = {'UserAgent': UserAgent().random}
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/"
SCREAMING_SNAKE_CASE = self.get_json()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : str ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["username"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["full_name"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["biography"]
@property
def _snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def _snake_case ( self : Optional[Any] ):
return self.user_data["external_url"]
@property
def _snake_case ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self : Any ):
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def _snake_case ( self : Dict ):
return self.user_data["is_private"]
def __a ( A__ : str = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(A__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 16
| 0
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def _UpperCamelCase ( UpperCamelCase__ ):
return input_array.reshape((input_array.size, 1) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = np.nan
for i in range(UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = features[:, labels == i]
UpperCAmelCase__ : int = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase__ : List[str] = data - column_reshape(UpperCamelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__ : Tuple = np.dot(UpperCamelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = features.mean(1 )
UpperCAmelCase__ : Optional[int] = np.nan
for i in range(UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = features[:, labels == i]
UpperCAmelCase__ : Any = data.shape[1]
UpperCAmelCase__ : List[str] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase__ ) - column_reshape(UpperCamelCase__ ) , (column_reshape(UpperCamelCase__ ) - column_reshape(UpperCamelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__ : int = device_data * np.dot(
column_reshape(UpperCamelCase__ ) - column_reshape(UpperCamelCase__ ) , (column_reshape(UpperCamelCase__ ) - column_reshape(UpperCamelCase__ )).T , )
return covariance_sum / features.shape[1]
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
# Check if the features have been loaded
if features.any():
UpperCAmelCase__ : Optional[Any] = features.mean(1 )
# Center the dataset
UpperCAmelCase__ : Dict = features - np.reshape(UpperCamelCase__ , (data_mean.size, 1) )
UpperCAmelCase__ : Tuple = np.dot(UpperCamelCase__ , centered_data.T ) / features.shape[1]
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = np.linalg.eigh(UpperCamelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase__ : Optional[int] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase__ : Union[str, Any] = np.dot(filtered_eigenvectors.T , UpperCamelCase__ )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=UpperCamelCase__ )
logging.error("""Dataset empty""" )
raise AssertionError
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase__ , UpperCAmelCase__ : Any = eigh(
covariance_between_classes(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , covariance_within_classes(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
UpperCAmelCase__ : Optional[Any] = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = np.linalg.svd(UpperCamelCase__ )
UpperCAmelCase__ : str = svd_matrix[:, 0:dimensions]
UpperCAmelCase__ : List[str] = np.dot(filtered_svd_matrix.T , UpperCamelCase__ )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=UpperCamelCase__ )
logging.error("""Dataset empty""" )
raise AssertionError
def _UpperCamelCase ( ):
# Create dummy dataset with 2 classes and 3 features
UpperCAmelCase__ : Union[str, Any] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase__ : str = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase__ : int = 2
UpperCAmelCase__ : Optional[int] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase__ ) as error_info:
UpperCAmelCase__ : Optional[Any] = linear_discriminant_analysis(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase__ : Optional[Any] = 2
UpperCAmelCase__ : Optional[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase__ ) as error_info:
UpperCAmelCase__ : Union[str, Any] = principal_component_analysis(UpperCamelCase__ , UpperCamelCase__ )
if not np.allclose(UpperCamelCase__ , UpperCamelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if height >= 1:
move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
move_disk(UpperCamelCase__ , UpperCamelCase__ )
move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
print("""moving disk from""" , UpperCamelCase__ , """to""" , UpperCamelCase__ )
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[str] = int(input("""Height of hanoi: """ ).strip() )
move_tower(UpperCamelCase__ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 113
| 1
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__A = """facebook/wmt19-en-de"""
__A = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__A = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__A = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
__A = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
__A = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
__A = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 93
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a )} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """The input training data file (a text file)."""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Whether ot not to use whole word mask."""} )
__magic_name__ :float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__magic_name__ :float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
__magic_name__ :int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__magic_name__ :int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) ->Optional[int]:
"""simple docstring"""
def _dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=_SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase__ :Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase__ :List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowerCAmelCase__ :Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowerCAmelCase__ :int = AutoModelWithLMHead.from_config(_SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowerCAmelCase__ :Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase__ :Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase__ :List[str] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase__ :Optional[int] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , evaluate=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase__ :str = DataCollatorForPermutationLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase__ :Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase__ :str = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ :Tuple = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase__ :Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ :Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ :Any = trainer.evaluate()
lowerCAmelCase__ :Optional[Any] = math.exp(eval_output['eval_loss'] )
lowerCAmelCase__ :Dict = {'perplexity': perplexity}
lowerCAmelCase__ :List[Any] = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_SCREAMING_SNAKE_CASE )
return results
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 93
| 1
|
from __future__ import annotations
from cmath import sqrt
def lowercase ( _a ,_a ,_a ) -> tuple[complex, complex]:
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
UpperCAmelCase_: Optional[Any] = b * b - 4 * a * c
UpperCAmelCase_: str = (-b + sqrt(_a )) / (2 * a)
UpperCAmelCase_: List[Any] = (-b - sqrt(_a )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowercase ( ) -> Tuple:
UpperCAmelCase_: Union[str, Any] = quadratic_roots(a=5 ,b=6 ,c=1 )
print(f"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 704
|
from __future__ import annotations
_lowerCAmelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowerCAmelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( _a ) -> list[float]:
UpperCAmelCase_: Dict = []
UpperCAmelCase_: List[Any] = len(_a )
for i in range(_a ):
UpperCAmelCase_: float = -1
for j in range(i + 1 ,_a ):
if arr[i] < arr[j]:
UpperCAmelCase_: List[str] = arr[j]
break
result.append(_a )
return result
def lowercase ( _a ) -> list[float]:
UpperCAmelCase_: List[Any] = []
for i, outer in enumerate(_a ):
UpperCAmelCase_: float = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCAmelCase_: Union[str, Any] = inner
break
result.append(_a )
return result
def lowercase ( _a ) -> list[float]:
UpperCAmelCase_: Union[str, Any] = len(_a )
UpperCAmelCase_: list[float] = []
UpperCAmelCase_: list[float] = [-1] * arr_size
for index in reversed(range(_a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCAmelCase_: Union[str, Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCAmelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 306
| 0
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''pixel_values''']
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : Tuple=PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
'''simple docstring'''
__a : Union[str, Any] = do_resize
__a : Optional[Any] = do_rescale
__a : Union[str, Any] = size_divisor
__a : str = resample
super().__init__(**SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a , __a : List[str] = get_image_size(SCREAMING_SNAKE_CASE__ )
# Rounds the height and width down to the closest multiple of size_divisor
__a : List[str] = height // size_divisor * size_divisor
__a : List[Any] = width // size_divisor * size_divisor
__a : Optional[Any] = resize(SCREAMING_SNAKE_CASE__ , (new_h, new_w) , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return image
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[TensorType, str]] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
__a : Optional[int] = do_resize if do_resize is not None else self.do_resize
__a : str = do_rescale if do_rescale is not None else self.do_rescale
__a : Optional[Any] = size_divisor if size_divisor is not None else self.size_divisor
__a : Optional[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
__a : Union[str, Any] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
__a : Optional[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for img in images]
if do_resize:
__a : Union[str, Any] = [self.resize(SCREAMING_SNAKE_CASE__ , size_divisor=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
__a : Tuple = [self.rescale(SCREAMING_SNAKE_CASE__ , scale=1 / 2_5_5 ) for image in images]
__a : Tuple = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
__a : Dict = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 47
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = hidden_size
__a : Optional[Any] = feat_extract_norm
__a : List[str] = feat_extract_activation
__a : Dict = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
__a : List[str] = list(SCREAMING_SNAKE_CASE__ )
__a : int = conv_bias
__a : Tuple = num_conv_pos_embeddings
__a : List[str] = num_conv_pos_embedding_groups
__a : Optional[Any] = len(self.conv_dim )
__a : Union[str, Any] = num_hidden_layers
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = squeeze_factor
__a : List[Any] = max_position_embeddings
__a : Tuple = position_buckets
__a : Optional[int] = share_att_key
__a : List[str] = relative_attention
__a : Any = norm_rel_ebd
__a : Any = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = hidden_act
__a : str = num_attention_heads
__a : Union[str, Any] = hidden_dropout
__a : Optional[int] = attention_dropout
__a : List[str] = activation_dropout
__a : int = feat_proj_dropout
__a : int = final_dropout
__a : Dict = layer_norm_eps
__a : Tuple = feature_layer_norm_eps
__a : str = initializer_range
__a : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Tuple = apply_spec_augment
__a : Optional[Any] = mask_time_prob
__a : Any = mask_time_length
__a : List[str] = mask_time_min_masks
__a : List[str] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : Any = mask_feature_min_masks
# ctc loss
__a : Optional[int] = ctc_loss_reduction
__a : List[Any] = ctc_zero_infinity
# sequence classification
__a : Dict = use_weighted_layer_sum
__a : Optional[Any] = classifier_proj_size
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _a ( unittest.TestCase):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict=7 , __lowerCamelCase: int=3 , __lowerCamelCase: Optional[int]=18 , __lowerCamelCase: str=30 , __lowerCamelCase: Tuple=400 , __lowerCamelCase: Any=True , __lowerCamelCase: List[str]=None , __lowerCamelCase: List[Any]=True , __lowerCamelCase: List[Any]=False , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=True , __lowerCamelCase: Dict=[0.5, 0.5, 0.5] , __lowerCamelCase: List[str]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase__: Tuple = parent
UpperCamelCase__: Optional[Any] = batch_size
UpperCamelCase__: Optional[int] = num_channels
UpperCamelCase__: Optional[Any] = image_size
UpperCamelCase__: str = min_resolution
UpperCamelCase__: List[Any] = max_resolution
UpperCamelCase__: List[str] = do_resize
UpperCamelCase__: Dict = size if size is not None else {"height": 18, "width": 20}
UpperCamelCase__: Union[str, Any] = do_thumbnail
UpperCamelCase__: Optional[int] = do_align_axis
UpperCamelCase__: Any = do_pad
UpperCamelCase__: Optional[int] = do_normalize
UpperCamelCase__: Optional[int] = image_mean
UpperCamelCase__: Any = image_std
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _a ( UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Dict = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_thumbnail" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
UpperCamelCase__: Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase__: int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase__: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase__: Dict = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase__: List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase__: Optional[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase__: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase__: Any = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 221
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A__: Union[str, Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """albert"""
def __init__( self: Dict , __lowerCamelCase: int=3_0000 , __lowerCamelCase: Dict=128 , __lowerCamelCase: Optional[int]=4096 , __lowerCamelCase: Optional[int]=12 , __lowerCamelCase: List[Any]=1 , __lowerCamelCase: List[Any]=64 , __lowerCamelCase: Optional[Any]=1_6384 , __lowerCamelCase: int=1 , __lowerCamelCase: List[str]="gelu_new" , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: Optional[Any]=0 , __lowerCamelCase: Union[str, Any]=512 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: Any=1e-12 , __lowerCamelCase: int=0.1 , __lowerCamelCase: Dict="absolute" , __lowerCamelCase: List[str]=0 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: Dict=3 , **__lowerCamelCase: int , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase__: Any = vocab_size
UpperCamelCase__: str = embedding_size
UpperCamelCase__: Optional[Any] = hidden_size
UpperCamelCase__: Any = num_hidden_layers
UpperCamelCase__: str = num_hidden_groups
UpperCamelCase__: int = num_attention_heads
UpperCamelCase__: Union[str, Any] = inner_group_num
UpperCamelCase__: str = hidden_act
UpperCamelCase__: Tuple = intermediate_size
UpperCamelCase__: Dict = hidden_dropout_prob
UpperCamelCase__: List[Any] = attention_probs_dropout_prob
UpperCamelCase__: List[str] = max_position_embeddings
UpperCamelCase__: Optional[Any] = type_vocab_size
UpperCamelCase__: Any = initializer_range
UpperCamelCase__: int = layer_norm_eps
UpperCamelCase__: List[str] = classifier_dropout_prob
UpperCamelCase__: str = position_embedding_type
class _a ( UpperCamelCase__):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__: Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__: Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 221
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a : Tuple = {
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : str = ["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 606
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCAmelCase ( A , A=7 ):
'''simple docstring'''
UpperCAmelCase__ =None
if token is not None:
UpperCAmelCase__ ={"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
UpperCAmelCase__ ="636036"
UpperCAmelCase__ =F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
UpperCAmelCase__ =requests.get(A , headers=A ).json()
return result["workflow_runs"]
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =get_daily_ci_runs(A )
UpperCAmelCase__ =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
UpperCAmelCase__ =workflow_run["id"]
break
return workflow_run_id
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =get_last_daily_ci_runs(A )
if workflow_run_id is not None:
UpperCAmelCase__ =get_artifacts_links(worflow_run_id=A , token=A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
UpperCAmelCase__ =artifacts_links[artifact_name]
download_artifact(
artifact_name=A , artifact_url=A , output_dir=A , token=A )
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
get_last_daily_ci_artifacts(A , A , A )
UpperCAmelCase__ ={}
for artifact_name in artifact_names:
UpperCAmelCase__ =os.path.join(A , F"""{artifact_name}.zip""" )
if os.path.isfile(A ):
UpperCAmelCase__ ={}
with zipfile.ZipFile(A ) as z:
for filename in z.namelist():
if not os.path.isdir(A ):
# read the file
with z.open(A ) as f:
UpperCAmelCase__ =f.read().decode("UTF-8" )
return results
| 625
| 0
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A_ ( __a : str , __a : complex , __a : str = "x" , __a : float = 10**-10 , __a : int = 1 , ):
"""simple docstring"""
a__ = symbols(__a )
a__ = lambdify(__a , __a )
a__ = lambdify(__a , diff(__a , __a ) )
a__ = starting_point
while True:
if diff_function(__a ) != 0:
a__ = prev_guess - multiplicity * func(__a ) / diff_function(
__a )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
a__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 351
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
UpperCAmelCase = False
UpperCAmelCase = False
def A_ ( __a : Namespace ):
"""simple docstring"""
return TrainCommand(__a )
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
@staticmethod
def _a ( a_ ):
a__ = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=a_ , required=a_ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=a_ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=a_ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=a_ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=a_ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=a_ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=a_ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=a_ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=a_ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=a_ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=a_ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=a_ , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=a_ , default=1E-0_8 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=a_ )
def __init__( self , a_ ):
a__ = logging.get_logger("""transformers-cli/training""" )
a__ = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=a_ )
a__ = args.output
a__ = args.column_label
a__ = args.column_text
a__ = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
a__ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
a__ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a__ = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
a__ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a__ = args.validation_split
a__ = args.train_batch_size
a__ = args.valid_batch_size
a__ = args.learning_rate
a__ = args.adam_epsilon
def _a ( self ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _a ( self ):
raise NotImplementedError
def _a ( self ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 351
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Optional[Any] = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ='vit_mae'
def __init__( self : str , a__ : List[Any]=768 , a__ : Dict=12 , a__ : Union[str, Any]=12 , a__ : int=3072 , a__ : List[str]="gelu" , a__ : Union[str, Any]=0.0 , a__ : Dict=0.0 , a__ : Union[str, Any]=0.02 , a__ : List[str]=1e-1_2 , a__ : Any=224 , a__ : str=16 , a__ : Tuple=3 , a__ : str=True , a__ : List[str]=16 , a__ : Union[str, Any]=512 , a__ : Dict=8 , a__ : Dict=2048 , a__ : Optional[Any]=0.75 , a__ : Any=False , **a__ : Optional[Any] , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = qkv_bias
UpperCAmelCase = decoder_num_attention_heads
UpperCAmelCase = decoder_hidden_size
UpperCAmelCase = decoder_num_hidden_layers
UpperCAmelCase = decoder_intermediate_size
UpperCAmelCase = mask_ratio
UpperCAmelCase = norm_pix_loss
| 51
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( A_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( A_: Dict ) -> Any:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =_ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , )
__UpperCAmelCase =None
if credentials_configuration == 0:
__UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__UpperCAmelCase =aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__UpperCAmelCase =_ask_field("""AWS Access Key ID: """ )
__UpperCAmelCase =aws_access_key_id
__UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ )
__UpperCAmelCase =aws_secret_access_key
__UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__UpperCAmelCase =aws_region
__UpperCAmelCase =_ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , )
if role_management == 0:
__UpperCAmelCase =_ask_field("""Enter your IAM role name: """ )
else:
__UpperCAmelCase ="""accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__UpperCAmelCase =_ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_custom_docker_image:
__UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() )
__UpperCAmelCase =_ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_inputs_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_metrics_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__UpperCAmelCase ={}
__UpperCAmelCase =_ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__UpperCAmelCase ="""dynamo_"""
__UpperCAmelCase =_ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__UpperCAmelCase =_ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__UpperCAmelCase =_ask_options(
"""Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , )
__UpperCAmelCase =_ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase ="""Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__UpperCAmelCase =_ask_options(
A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" )
__UpperCAmelCase =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCAmelCase =_ask_field(
"""How many machines do you want use? [1]: """ , A_ , default=1 , )
__UpperCAmelCase =_ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 68
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A : List[Any] = logging.get_logger(__name__)
A : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A : Tuple = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
A : Union[str, Any] = {
"""yjernite/retribert-base-uncased""": 512,
}
A : Tuple = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE( __A ):
snake_case_ : str = VOCAB_FILES_NAMES
snake_case_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
snake_case_ : Dict = RetriBertTokenizer
snake_case_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Dict:
"""simple docstring"""
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase__ ) != tokenize_chinese_chars
):
__lowercase = getattr(lowerCamelCase__ , normalizer_state.pop("""type""" ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**lowerCamelCase__ )
__lowercase = do_lower_case
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Tuple:
"""simple docstring"""
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 163
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=30 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__lowercase = FlaxViTModel(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowercase = (self.image_size, self.image_size)
__lowercase = (self.patch_size, self.patch_size)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = FlaxViTForImageClassification(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxViTForImageClassification(lowerCamelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(lowerCamelCase__ )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE( __A , unittest.TestCase ):
snake_case_ : Optional[Any] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def snake_case__ ( self ) -> None:
"""simple docstring"""
__lowercase = FlaxViTModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , **lowerCamelCase__ ):
return model(pixel_values=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest("""JIT Enabled""" ):
__lowercase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__lowercase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 163
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = 'convbert'
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : Tuple = embedding_size
lowercase__ : List[str] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Dict = num_groups
lowercase__ : int = classifier_dropout
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 12
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Any = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''', type=_UpperCamelCase, default=1, help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''', type=_UpperCamelCase, help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
), )
# rest from the training program
parser.add_argument('''training_script_args''', nargs=_UpperCamelCase )
return parser.parse_args()
def __lowercase ( ) ->str:
"""simple docstring"""
lowercase : Tuple = parse_args()
# Import training_script as a module.
lowercase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : Optional[Any] = script_fpath.stem
lowercase : str = importlib.import_module(_UpperCamelCase )
# Patch sys.argv
lowercase : Optional[int] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 319
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase: Optional[int] = 'ZinengTang/tvlt-base'
UpperCAmelCase: Optional[int] = tempfile.mkdtemp()
def A__ ( self , **__snake_case ) -> List[str]:
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def A__ ( self , **__snake_case ) -> Optional[Any]:
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: Dict = self.get_image_processor()
UpperCAmelCase: Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase: Any = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase: Union[str, Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase: Optional[int] = self.get_image_processor()
UpperCAmelCase: Tuple = self.get_feature_extractor()
UpperCAmelCase: Union[str, Any] = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
UpperCAmelCase: List[str] = np.ones([1_2_0_0_0] )
UpperCAmelCase: Optional[Any] = feature_extractor(UpperCAmelCase_ , return_tensors="np" )
UpperCAmelCase: Optional[Any] = processor(audio=UpperCAmelCase_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase: int = self.get_image_processor()
UpperCAmelCase: Optional[int] = self.get_feature_extractor()
UpperCAmelCase: Union[str, Any] = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
UpperCAmelCase: Dict = np.ones([3, 2_2_4, 2_2_4] )
UpperCAmelCase: str = image_processor(UpperCAmelCase_ , return_tensors="np" )
UpperCAmelCase: Any = processor(images=UpperCAmelCase_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase: List[Any] = self.get_image_processor()
UpperCAmelCase: List[Any] = self.get_feature_extractor()
UpperCAmelCase: Tuple = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
UpperCAmelCase: int = np.ones([1_2_0_0_0] )
UpperCAmelCase: Optional[Any] = np.ones([3, 2_2_4, 2_2_4] )
UpperCAmelCase: str = processor(audio=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def A__ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase: Tuple = self.get_image_processor()
UpperCAmelCase: Dict = self.get_feature_extractor()
UpperCAmelCase: Dict = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 706
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 166
| 0
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any]=1 ) -> Any:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int=0 ) -> Tuple:
_a : Any =[]
for old_item in old_list:
_a : List[Any] =old_item.replace("""in_layers.0""" ,"""norm1""" )
_a : int =new_item.replace("""in_layers.2""" ,"""conv1""" )
_a : Dict =new_item.replace("""out_layers.0""" ,"""norm2""" )
_a : Dict =new_item.replace("""out_layers.3""" ,"""conv2""" )
_a : Any =new_item.replace("""emb_layers.1""" ,"""time_emb_proj""" )
_a : Tuple =new_item.replace("""skip_connection""" ,"""conv_shortcut""" )
_a : Optional[Any] =shave_segments(_UpperCAmelCase ,n_shave_prefix_segments=_UpperCAmelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : str=0 ) -> Optional[Any]:
_a : str =[]
for old_item in old_list:
_a : Union[str, Any] =old_item
_a : Union[str, Any] =new_item.replace("""norm.weight""" ,"""group_norm.weight""" )
_a : str =new_item.replace("""norm.bias""" ,"""group_norm.bias""" )
_a : List[Any] =new_item.replace("""proj_out.weight""" ,"""proj_attn.weight""" )
_a : Optional[Any] =new_item.replace("""proj_out.bias""" ,"""proj_attn.bias""" )
_a : Tuple =shave_segments(_UpperCAmelCase ,n_shave_prefix_segments=_UpperCAmelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Any=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Any=None ) -> List[Any]:
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Dict =old_checkpoint[path]
_a : int =old_tensor.shape[0] // 3
_a : Any =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : Union[str, Any] =old_tensor.shape[0] // config["""num_head_channels"""] // 3
_a : Dict =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Union[str, Any] =old_tensor.split(channels // num_heads ,dim=1 )
_a : Optional[int] =query.reshape(_UpperCAmelCase )
_a : Union[str, Any] =key.reshape(_UpperCAmelCase )
_a : Any =value.reshape(_UpperCAmelCase )
for path in paths:
_a : List[str] =path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : List[Any] =new_path.replace("""middle_block.0""" ,"""mid_block.resnets.0""" )
_a : str =new_path.replace("""middle_block.1""" ,"""mid_block.attentions.0""" )
_a : List[str] =new_path.replace("""middle_block.2""" ,"""mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : Optional[Any] =new_path.replace(replacement["""old"""] ,replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : Optional[int] =old_checkpoint[path["""old"""]][:, :, 0]
else:
_a : List[Any] =old_checkpoint[path["""old"""]]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : str ) -> Dict:
_a : Dict ={}
_a : Optional[Any] =checkpoint["""time_embed.0.weight"""]
_a : Tuple =checkpoint["""time_embed.0.bias"""]
_a : List[str] =checkpoint["""time_embed.2.weight"""]
_a : int =checkpoint["""time_embed.2.bias"""]
_a : int =checkpoint["""input_blocks.0.0.weight"""]
_a : Union[str, Any] =checkpoint["""input_blocks.0.0.bias"""]
_a : Tuple =checkpoint["""out.0.weight"""]
_a : Dict =checkpoint["""out.0.bias"""]
_a : Union[str, Any] =checkpoint["""out.2.weight"""]
_a : Dict =checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_a : str =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_a : str ={
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(_UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
_a : Optional[Any] =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_a : Tuple ={
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(_UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
_a : List[str] =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_a : str ={
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(_UpperCAmelCase )
}
for i in range(1 ,_UpperCAmelCase ):
_a : Union[str, Any] =(i - 1) // (config["""num_res_blocks"""] + 1)
_a : List[str] =(i - 1) % (config["""num_res_blocks"""] + 1)
_a : List[Any] =[key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_a : Tuple =[key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_a : str =checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_a : Optional[int] =checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_a : int =renew_resnet_paths(_UpperCAmelCase )
_a : Union[str, Any] ={"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_a : str ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,additional_replacements=[meta_path, resnet_op] ,config=_UpperCAmelCase )
if len(_UpperCAmelCase ):
_a : Any =renew_attention_paths(_UpperCAmelCase )
_a : List[str] ={
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_a : str ={
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,additional_replacements=[meta_path] ,attention_paths_to_split=_UpperCAmelCase ,config=_UpperCAmelCase ,)
_a : str =middle_blocks[0]
_a : Optional[Any] =middle_blocks[1]
_a : Tuple =middle_blocks[2]
_a : Union[str, Any] =renew_resnet_paths(_UpperCAmelCase )
assign_to_checkpoint(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,config=_UpperCAmelCase )
_a : List[str] =renew_resnet_paths(_UpperCAmelCase )
assign_to_checkpoint(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,config=_UpperCAmelCase )
_a : List[Any] =renew_attention_paths(_UpperCAmelCase )
_a : Optional[Any] ={
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,attention_paths_to_split=_UpperCAmelCase ,config=_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
_a : int =i // (config["""num_res_blocks"""] + 1)
_a : str =i % (config["""num_res_blocks"""] + 1)
_a : List[Any] =[shave_segments(_UpperCAmelCase ,2 ) for name in output_blocks[i]]
_a : Optional[Any] ={}
for layer in output_block_layers:
_a , _a : Optional[Any] =layer.split(""".""" )[0], shave_segments(_UpperCAmelCase ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_UpperCAmelCase )
else:
_a : int =[layer_name]
if len(_UpperCAmelCase ) > 1:
_a : Tuple =[key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_a : List[Any] =[key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_a : Optional[Any] =renew_resnet_paths(_UpperCAmelCase )
_a : str =renew_resnet_paths(_UpperCAmelCase )
_a : Any ={"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,additional_replacements=[meta_path] ,config=_UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : Any =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_a : Any =checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_a : Any =checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(_UpperCAmelCase ) == 2:
_a : str =[]
if len(_UpperCAmelCase ):
_a : List[str] =renew_attention_paths(_UpperCAmelCase )
_a : Union[str, Any] ={
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_a : Dict ={
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None ,config=_UpperCAmelCase ,)
else:
_a : Optional[Any] =renew_resnet_paths(_UpperCAmelCase ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : str =""".""".join(["""output_blocks""", str(_UpperCAmelCase ), path["""old"""]] )
_a : str =""".""".join(["""up_blocks""", str(_UpperCAmelCase ), """resnets""", str(_UpperCAmelCase ), path["""new"""]] )
_a : Optional[Any] =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A__: int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A__: str = parser.parse_args()
A__: Optional[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A__: Any = json.loads(f.read())
A__: Tuple = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A__: int = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A__: Any = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A__: str = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A__: Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 694
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCamelCase ( a : np.ndarray , a : np.ndarray ) ->float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(a , a ) ) )
def __UpperCamelCase ( a : np.ndarray , a : np.ndarray ) ->list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
snake_case = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(a )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(a )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
snake_case = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(a )
snake_case = []
for value in value_array:
snake_case = euclidean(a , dataset[0] )
snake_case = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case = euclidean(a , a )
if dist > temp_dist:
snake_case = temp_dist
snake_case = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCamelCase ( a : np.ndarray , a : np.ndarray ) ->float:
return np.dot(a , a ) / (norm(a ) * norm(a ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=__a ):
_UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A__ , **A__ ) -> Union[str, Any]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Any:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 44
| 1
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def lowercase ( self : Tuple ) -> Any:
__lowerCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
__lowerCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__lowerCAmelCase = bertabert.config.encoder.vocab_size
__lowerCAmelCase = tokenizer.sep_token_id
__lowerCAmelCase = tokenizer.cls_token_id
__lowerCAmelCase = 1_2_8
__lowerCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
__lowerCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
__lowerCAmelCase = train_dataset.select(range(3_2 ) )
__lowerCAmelCase = val_dataset.select(range(1_6 ) )
__lowerCAmelCase = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase_ : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCAmelCase = tokenizer(batch['article'] , padding='max_length' , truncation=lowerCAmelCase_ , max_length=5_1_2 )
__lowerCAmelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowerCAmelCase_ , max_length=1_2_8 )
__lowerCAmelCase = inputs.input_ids
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = outputs.input_ids
__lowerCAmelCase = outputs.input_ids.copy()
__lowerCAmelCase = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__lowerCAmelCase = outputs.attention_mask
assert all(len(lowerCAmelCase_ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowerCAmelCase_ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = pred.label_ids
__lowerCAmelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase_ ) )] ) / len(lowerCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
__lowerCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase_ , per_device_train_batch_size=lowerCAmelCase_ , per_device_eval_batch_size=lowerCAmelCase_ , predict_with_generate=lowerCAmelCase_ , evaluation_strategy='steps' , do_train=lowerCAmelCase_ , do_eval=lowerCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCAmelCase = SeqaSeqTrainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , )
# start training
trainer.train()
| 53
|
"""simple docstring"""
class __A :
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : int ,_snake_case : str ,_snake_case : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : Tuple = None
lowercase__ : str = None
lowercase__ : Dict = graph
self._normalize_graph(_snake_case ,_snake_case )
lowercase__ : Any = len(_snake_case )
lowercase__ : Any = None
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[str] ,_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
if sources is int:
lowercase__ : Optional[int] = [sources]
if sinks is int:
lowercase__ : str = [sinks]
if len(_snake_case ) == 0 or len(_snake_case ) == 0:
return
lowercase__ : str = sources[0]
lowercase__ : Optional[int] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_snake_case ) > 1 or len(_snake_case ) > 1:
lowercase__ : Tuple = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase__ : Dict = len(self.graph ) + 1
for room in self.graph:
room.insert(0 ,0 )
self.graph.insert(0 ,[0] * size )
for i in sources:
lowercase__ : Optional[Any] = max_input_flow
lowercase__ : Dict = 0
lowercase__ : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase__ : List[str] = max_input_flow
lowercase__ : int = size - 1
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase ( self : str ,_snake_case : List[Any] ) -> int:
"""simple docstring"""
lowercase__ : Tuple = algorithm(self )
class __A :
'''simple docstring'''
def __init__( self : int ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : int = flow_network
lowercase__ : int = flow_network.verticesCount
lowercase__ : Tuple = flow_network.sourceIndex
lowercase__ : str = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase__ : Optional[Any] = flow_network.graph
lowercase__ : Optional[int] = False
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase__ : Tuple = True
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(_snake_case )
# use this to save your result
lowercase__ : Union[str, Any] = -1
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase__ : List[str] = [0] * self.verticies_count
lowercase__ : Tuple = [0] * self.verticies_count
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowercase__ : str = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase__ : Union[str, Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase__ : Tuple = 0
while i < len(_snake_case ):
lowercase__ : Dict = vertices_list[i]
lowercase__ : Optional[Any] = self.heights[vertex_index]
self.process_vertex(_snake_case )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 ,vertices_list.pop(_snake_case ) )
lowercase__ : Optional[int] = 0
else:
i += 1
lowercase__ : Dict = sum(self.preflow[self.source_index] )
def UpperCAmelCase ( self : Any ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_snake_case ,_snake_case )
self.relabel(_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : int ,_snake_case : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = min(
self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : int = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase__ : Optional[int] = self.heights[to_index]
if min_height is not None:
lowercase__ : Optional[int] = min_height + 1
if __name__ == "__main__":
lowerCAmelCase_ = [0]
lowerCAmelCase_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCAmelCase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCAmelCase_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCAmelCase_ = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 560
| 0
|
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , _A : int , _A : Any=2 , _A : str=56 , _A : Any=True , _A : Dict=True , _A : Optional[Any]=True , _A : Any=True , _A : Tuple=99 , _A : Optional[int]=32 , _A : Tuple=2 , _A : List[str]=2 , _A : Any=7 , _A : int="gelu_new" , _A : Any=0.1 , _A : Any=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Any=2 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=4 , _A : Any="block_sparse" , _A : List[Any]=True , _A : Tuple=False , _A : str=2 , _A : List[str]=3 , ):
'''simple docstring'''
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : Any = seq_length
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Dict = use_attention_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : int = type_vocab_size
UpperCAmelCase__ : int = type_sequence_label_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : Optional[int] = num_choices
UpperCAmelCase__ : str = rescale_embeddings
UpperCAmelCase__ : Optional[int] = attention_type
UpperCAmelCase__ : Union[str, Any] = use_bias
UpperCAmelCase__ : Dict = block_size
UpperCAmelCase__ : int = num_random_blocks
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
UpperCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase_ ( __UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase_ ( self : Any ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase_ ( self : Dict ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase_ ( self : int ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(_A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def model_jitted(_A : int , _A : str=None , **_A : Tuple ):
return model(input_ids=_A , attention_mask=_A , **_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Dict = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : Any=1e-5 , _A : Optional[int]="outputs" , _A : Tuple=None ):
'''simple docstring'''
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(_A , _A , _A , _A , _A , _A )
| 717
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def lowercase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
# fmt: off
UpperCAmelCase__ : str = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase__ : Dict = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
def lowercase_ ( self : List[str] , **_A : Dict ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : str , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''tester'''
UpperCAmelCase__ : Tuple = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ : str = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
UpperCAmelCase__ : int = tokenizer.encode([special_token] , add_special_tokens=_A )
self.assertEqual(len(_A ) , 1 )
UpperCAmelCase__ : Any = tokenizer.decode(_A , skip_special_tokens=_A )
self.assertTrue(special_token not in decoded )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.get_input_output_texts(_A )
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
UpperCAmelCase__ : str = tokenizer.convert_tokens_to_ids(_A )
UpperCAmelCase__ : Tuple = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : int = tokenizer.convert_ids_to_tokens(_A )
self.assertNotEqual(len(_A ) , 0 )
UpperCAmelCase__ : List[Any] = tokenizer.decode(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _A )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
| 312
| 0
|
"""simple docstring"""
__lowerCamelCase = 0 # The first color of the flag.
__lowerCamelCase = 1 # The second color of the flag.
__lowerCamelCase = 2 # The third color of the flag.
__lowerCamelCase = (red, white, blue)
def a ( __snake_case : list ):
'''simple docstring'''
if not sequence:
return []
if len(__snake_case ) == 1:
return list(__snake_case )
UpperCAmelCase_ :Optional[int] = 0
UpperCAmelCase_ :List[str] = len(__snake_case ) - 1
UpperCAmelCase_ :Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCAmelCase_ ,UpperCAmelCase_ :Optional[int] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCAmelCase_ ,UpperCAmelCase_ :List[Any] = sequence[high], sequence[mid]
high -= 1
else:
UpperCAmelCase_ :Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(__snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = input("Enter numbers separated by commas:\n").strip()
__lowerCamelCase = [int(item.strip()) for item in user_input.split(",")]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 608
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["GLPNFeatureExtractor"]
__lowerCamelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 608
| 1
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCAmelCase__ = 1_0
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
for i in range(lowercase ,lowercase ):
if array[i] == target:
return i
return -1
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = len(lowercase )
while left <= right:
if right - left < precision:
return lin_search(lowercase ,lowercase ,lowercase ,lowercase )
_UpperCAmelCase = (left + right) // 3 + 1
_UpperCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_UpperCAmelCase = one_third - 1
elif array[two_third] < target:
_UpperCAmelCase = two_third + 1
else:
_UpperCAmelCase = one_third + 1
_UpperCAmelCase = two_third - 1
else:
return -1
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(lowercase ,lowercase ,lowercase ,lowercase )
_UpperCAmelCase = (left + right) // 3 + 1
_UpperCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase ,one_third - 1 ,lowercase ,lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,lowercase ,lowercase ,lowercase )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,lowercase ,lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = input("""Enter numbers separated by comma:\n""").strip()
UpperCAmelCase__ = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
UpperCAmelCase__ = int(input("""Enter the number to be found in the list:\n""").strip())
UpperCAmelCase__ = ite_ternary_search(collection, target)
UpperCAmelCase__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 719
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class a :
def __init__( self : Tuple , __lowerCAmelCase : str = None , __lowerCAmelCase : uuid.UUID = None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Tuple=None ):
if not conversation_id:
_UpperCAmelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCAmelCase = []
if generated_responses is None:
_UpperCAmelCase = []
_UpperCAmelCase = conversation_id
_UpperCAmelCase = past_user_inputs
_UpperCAmelCase = generated_responses
_UpperCAmelCase = text
def __eq__( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
_UpperCAmelCase = text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
_UpperCAmelCase = text
def lowerCAmelCase_ ( self : int ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_UpperCAmelCase = None
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ):
self.generated_responses.append(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Optional[int] ):
_UpperCAmelCase = f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
_UpperCAmelCase = """user""" if is_user else """bot"""
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
lowerCAmelCase_ , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class a ( lowerCAmelCase_ ):
def __init__( self : List[Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if self.tokenizer.pad_token_id is None:
_UpperCAmelCase = self.tokenizer.eos_token
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str ):
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if min_length_for_response is not None:
_UpperCAmelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCAmelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__lowerCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , __lowerCAmelCase : Union[Conversation, List[Conversation]] , __lowerCAmelCase : int=0 , **__lowerCAmelCase : List[str] ):
_UpperCAmelCase = super().__call__(__lowerCAmelCase , num_workers=__lowerCAmelCase , **__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Conversation , __lowerCAmelCase : Optional[int]=32 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
_UpperCAmelCase = self.tokenizer._build_conversation_input_ids(__lowerCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCAmelCase = self._legacy_parse_and_tokenize(__lowerCAmelCase )
if self.framework == "pt":
_UpperCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_UpperCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=10 , **__lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
_UpperCAmelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
_UpperCAmelCase = max_length - minimum_tokens
_UpperCAmelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCAmelCase = model_inputs["""attention_mask"""][:, -trim:]
_UpperCAmelCase = model_inputs.pop("""conversation""" )
_UpperCAmelCase = max_length
_UpperCAmelCase = self.model.generate(**__lowerCAmelCase , **__lowerCAmelCase )
if self.model.config.is_encoder_decoder:
_UpperCAmelCase = 1
else:
_UpperCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=True ):
_UpperCAmelCase = model_outputs["""output_ids"""]
_UpperCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
_UpperCAmelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__lowerCAmelCase )
return conversation
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Conversation ):
_UpperCAmelCase = self.tokenizer.eos_token_id
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) )
if len(__lowerCAmelCase ) > self.tokenizer.model_max_length:
_UpperCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 275
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = ["image_processor", "tokenizer"]
UpperCAmelCase__ = "Pix2StructImageProcessor"
UpperCAmelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Optional[Any] , __snake_case : Any , __snake_case : Any ) -> str:
__magic_name__: Dict = False
super().__init__(__snake_case , __snake_case )
def __call__( self : int , __snake_case : List[str]=None , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : Optional[int] = 2_0_4_8 , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Dict , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
__magic_name__: Optional[int] = self.tokenizer
__magic_name__: Optional[int] = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__magic_name__: Optional[int] = self.image_processor(
__snake_case , return_tensors=__snake_case , max_patches=__snake_case , **__snake_case )
else:
# add pixel_values and bbox
__magic_name__: Optional[int] = self.image_processor(
__snake_case , return_tensors=__snake_case , max_patches=__snake_case , header_text=__snake_case , **__snake_case )
if text is not None and not self.image_processor.is_vqa:
__magic_name__: Optional[int] = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
if "attention_mask" in text_encoding:
__magic_name__: Optional[Any] = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
__magic_name__: Tuple = text_encoding.pop("""input_ids""" )
else:
__magic_name__: int = None
if text_encoding is not None:
encoding_image_processor.update(__snake_case )
return encoding_image_processor
def lowerCamelCase__ ( self : List[Any] , *__snake_case : Optional[Any] , **__snake_case : Tuple ) -> Optional[int]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCamelCase__ ( self : Optional[Any] , *__snake_case : Tuple , **__snake_case : Any ) -> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
__magic_name__: Dict = self.tokenizer.model_input_names
__magic_name__: List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 96
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCamelCase : Union[str, Any] = '''
Human: <<task>>
Assistant: '''
_lowerCamelCase : Optional[Any] = '''huggingface-tools/default-prompts'''
_lowerCamelCase : int = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple="run" ):
if prompt_or_repo_id is None:
SCREAMING_SNAKE_CASE = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , UpperCAmelCase__ ) is not None:
return prompt_or_repo_id
SCREAMING_SNAKE_CASE = cached_file(
UpperCAmelCase__ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as f:
return f.read()
| 403
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowercase = logging.get_logger(__name__)
_lowercase = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
_lowercase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> Tuple:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
A__ = model_type_to_module_name(UpperCamelCase_ )
A__ = importlib.import_module(f".{module_name}" , '''transformers.models''' )
try:
return getattr(UpperCamelCase_ , UpperCamelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase_ , '''__name__''' , UpperCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A__ = importlib.import_module('''transformers''' )
if hasattr(UpperCamelCase_ , UpperCamelCase_ ):
return getattr(UpperCamelCase_ , UpperCamelCase_ )
return None
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, os.PathLike] , UpperCamelCase_ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[Dict[str, str]] = None , UpperCamelCase_ : Optional[Union[bool, str]] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : bool = False , **UpperCamelCase_ : Optional[Any] , )-> List[Any]:
A__ = get_file_from_repo(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as reader:
return json.load(UpperCamelCase_ )
class _UpperCAmelCase :
def __init__( self):
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''')
@classmethod
@replace_list_option_in_docstrings(a__)
def snake_case_ ( cls , a__ , **a__):
A__ = kwargs.pop('''config''' , a__)
A__ = kwargs.pop('''trust_remote_code''' , a__)
A__ = True
A__ , A__ = ImageProcessingMixin.get_image_processor_dict(a__ , **a__)
A__ = config_dict.get('''image_processor_type''' , a__)
A__ = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {}):
A__ = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
A__ = config_dict.pop('''feature_extractor_type''' , a__)
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''')
A__ = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''')
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {}):
A__ = config_dict['''auto_map''']['''AutoFeatureExtractor''']
A__ = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''')
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''')
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a__ , a__):
A__ = AutoConfig.from_pretrained(a__ , **a__)
# It could be in `config.image_processor_type``
A__ = getattr(a__ , '''image_processor_type''' , a__)
if hasattr(a__ , '''auto_map''') and "AutoImageProcessor" in config.auto_map:
A__ = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
A__ = image_processor_class_from_name(a__)
A__ = image_processor_auto_map is not None
A__ = image_processor_class is not None or type(a__) in IMAGE_PROCESSOR_MAPPING
A__ = resolve_trust_remote_code(
a__ , a__ , a__ , a__)
if has_remote_code and trust_remote_code:
A__ = get_class_from_dynamic_module(
a__ , a__ , **a__)
A__ = kwargs.pop('''code_revision''' , a__)
if os.path.isdir(a__):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a__ , **a__)
elif image_processor_class is not None:
return image_processor_class.from_dict(a__ , **a__)
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a__) in IMAGE_PROCESSOR_MAPPING:
A__ = IMAGE_PROCESSOR_MAPPING[type(a__)]
return image_processor_class.from_dict(a__ , **a__)
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys())}")
@staticmethod
def snake_case_ ( a__ , a__):
IMAGE_PROCESSOR_MAPPING.register(a__ , a__)
| 526
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase = 16
_lowercase = 32
def lowerCAmelCase__ ( UpperCamelCase_ : Accelerator , UpperCamelCase_ : int = 1_6 )-> List[str]:
A__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCamelCase_ : Any ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 1_6
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
UpperCamelCase_ , padding='''longest''' , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
A__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple )-> Union[str, Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , UpperCamelCase_ ) == "1":
A__ = 2
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['''lr''']
A__ = int(config['''num_epochs'''] )
A__ = int(config['''seed'''] )
A__ = int(config['''batch_size'''] )
A__ = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=UpperCamelCase_ )
def inner_training_loop(UpperCamelCase_ : Optional[int] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(UpperCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
A__ , A__ = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**UpperCamelCase_ )
A__ = outputs.loss
accelerator.backward(UpperCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCAmelCase__ ( )-> Optional[Any]:
A__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ = parser.parse_args()
A__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 526
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCamelCase__: List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCamelCase__: List[str] = model(__lowerCamelCase )["last_hidden_state"]
UpperCamelCase__: Optional[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice.
UpperCamelCase__: List[Any] = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 380
|
import logging
from transformers import PretrainedConfig
A__: Dict = logging.getLogger(__name__)
A__: List[Any] = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """bertabs"""
def __init__( self: Any , __lowerCamelCase: Optional[int]=3_0522 , __lowerCamelCase: List[Any]=512 , __lowerCamelCase: Optional[Any]=6 , __lowerCamelCase: Optional[Any]=512 , __lowerCamelCase: List[str]=8 , __lowerCamelCase: List[Any]=512 , __lowerCamelCase: Optional[int]=0.2 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: Optional[Any]=768 , __lowerCamelCase: int=8 , __lowerCamelCase: Union[str, Any]=2048 , __lowerCamelCase: Dict=0.2 , **__lowerCamelCase: Dict , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
UpperCamelCase__: str = vocab_size
UpperCamelCase__: Tuple = max_pos
UpperCamelCase__: Union[str, Any] = enc_layers
UpperCamelCase__: Optional[Any] = enc_hidden_size
UpperCamelCase__: Optional[Any] = enc_heads
UpperCamelCase__: Dict = enc_ff_size
UpperCamelCase__: Tuple = enc_dropout
UpperCamelCase__: Tuple = dec_layers
UpperCamelCase__: int = dec_hidden_size
UpperCamelCase__: int = dec_heads
UpperCamelCase__: str = dec_ff_size
UpperCamelCase__: Union[str, Any] = dec_dropout
| 380
| 1
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
SCREAMING_SNAKE_CASE_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class a ( nn.Module ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[str] = torchvision.models.resnetaaa(pretrained=A__ )
_UpperCAmelCase : Optional[Any] = list(model.children() )[:-2]
_UpperCAmelCase : Any = nn.Sequential(*A__ )
_UpperCAmelCase : Dict = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.pool(self.model(A__ ) )
_UpperCAmelCase : Any = torch.flatten(A__ , start_dim=2 )
_UpperCAmelCase : Tuple = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class a ( __a ):
def __init__( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = [json.loads(A__ ) for l in open(A__ )]
_UpperCAmelCase : Tuple = os.path.dirname(A__ )
_UpperCAmelCase : Optional[Any] = tokenizer
_UpperCAmelCase : Optional[int] = labels
_UpperCAmelCase : Union[str, Any] = len(A__ )
_UpperCAmelCase : Union[str, Any] = max_seq_length
_UpperCAmelCase : Optional[int] = transforms
def __len__( self ):
'''simple docstring'''
return len(self.data )
def __getitem__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Dict = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=A__ ) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
_UpperCAmelCase : Optional[int] = sentence[: self.max_seq_length]
_UpperCAmelCase : int = torch.zeros(self.n_classes )
_UpperCAmelCase : int = 1
_UpperCAmelCase : Optional[int] = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
_UpperCAmelCase : int = self.transforms(A__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> str:
_UpperCAmelCase : Any = [len(row["sentence"] ) for row in batch]
_UpperCAmelCase , _UpperCAmelCase : Any = len(lowerCAmelCase ), max(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = torch.zeros(lowerCAmelCase , lowerCAmelCase , dtype=torch.long )
_UpperCAmelCase : str = torch.zeros(lowerCAmelCase , lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCAmelCase , lowerCAmelCase ) ):
_UpperCAmelCase : Tuple = input_row["sentence"]
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Any = torch.stack([row["image"] for row in batch] )
_UpperCAmelCase : List[Any] = torch.stack([row["label"] for row in batch] )
_UpperCAmelCase : Optional[int] = torch.stack([row["image_start_token"] for row in batch] )
_UpperCAmelCase : List[Any] = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __SCREAMING_SNAKE_CASE ( ) -> str:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __SCREAMING_SNAKE_CASE ( ) -> Any:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 700
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
SCREAMING_SNAKE_CASE_ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
SCREAMING_SNAKE_CASE_ = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
SCREAMING_SNAKE_CASE_ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
SCREAMING_SNAKE_CASE_ = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
SCREAMING_SNAKE_CASE_ = 'allenai'
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> List[Any]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_UpperCAmelCase : Optional[int] = dict((re.sub(R"@@$" , "" , lowerCAmelCase ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , lowerCAmelCase ), v) for k, v in d.items() )
_UpperCAmelCase : Optional[int] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
_UpperCAmelCase : Any = d[k] # restore
return da
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] ) -> int:
# prep
assert os.path.exists(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_UpperCAmelCase : Optional[Any] = basename(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = dirname(lowerCAmelCase )
_UpperCAmelCase : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_UpperCAmelCase : Optional[int] = cls.hub_models()
_UpperCAmelCase : Dict = {"bpe": "fastbpe", "tokenizer": "moses"}
_UpperCAmelCase : Tuple = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
_UpperCAmelCase : int = hub_utils.from_pretrained(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , archive_map=lowerCAmelCase , **lowerCAmelCase )
_UpperCAmelCase : Any = vars(chkpt["args"]["model"] )
_UpperCAmelCase : Optional[int] = args["source_lang"]
_UpperCAmelCase : Dict = args["target_lang"]
_UpperCAmelCase : int = dirname(lowerCAmelCase )
_UpperCAmelCase : str = basename(lowerCAmelCase )
# dicts
_UpperCAmelCase : List[str] = os.path.join(lowerCAmelCase , F'dict.{src_lang}.txt' )
_UpperCAmelCase : Dict = os.path.join(lowerCAmelCase , F'dict.{tgt_lang}.txt' )
_UpperCAmelCase : Union[str, Any] = Dictionary.load(lowerCAmelCase )
_UpperCAmelCase : Any = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase : int = len(lowerCAmelCase )
_UpperCAmelCase : List[Any] = os.path.join(lowerCAmelCase , "vocab-src.json" )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_UpperCAmelCase : Union[str, Any] = True
for k in src_vocab.keys():
if not k.islower():
_UpperCAmelCase : Optional[int] = False
break
_UpperCAmelCase : Dict = Dictionary.load(lowerCAmelCase )
_UpperCAmelCase : List[str] = rewrite_dict_keys(tgt_dict.indices )
_UpperCAmelCase : Union[str, Any] = len(lowerCAmelCase )
_UpperCAmelCase : Dict = os.path.join(lowerCAmelCase , "vocab-tgt.json" )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase ) )
# merges_file (bpecodes)
_UpperCAmelCase : int = os.path.join(lowerCAmelCase , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_UpperCAmelCase : Tuple = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ):
break
with open(lowerCAmelCase , encoding="utf-8" ) as fin:
_UpperCAmelCase : List[str] = fin.read()
_UpperCAmelCase : str = re.sub(R" \d+$" , "" , lowerCAmelCase , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as fout:
fout.write(lowerCAmelCase )
# model config
_UpperCAmelCase : int = os.path.join(lowerCAmelCase , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
_UpperCAmelCase : Union[str, Any] = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
_UpperCAmelCase : Union[str, Any] = 5
_UpperCAmelCase : str = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_UpperCAmelCase : str = best_score_hparams[model_dir]["length_penalty"]
else:
_UpperCAmelCase : str = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase ) )
# tokenizer config
_UpperCAmelCase : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : str = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase ) )
# model
_UpperCAmelCase : Optional[int] = chkpt["models"][0]
_UpperCAmelCase : int = model.state_dict()
# rename keys to start with 'model.'
_UpperCAmelCase : Union[str, Any] = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_UpperCAmelCase : Any = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : List[Any] = FSMTConfig.from_pretrained(lowerCAmelCase )
_UpperCAmelCase : List[Any] = FSMTForConditionalGeneration(lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
# save
_UpperCAmelCase : List[str] = os.path.join(lowerCAmelCase , lowerCAmelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCAmelCase , lowerCAmelCase )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 467
| 0
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class UpperCamelCase__ ( _a ):
"""simple docstring"""
__magic_name__ = ["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = 8 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = do_rescale
_lowerCAmelCase : List[Any] = rescale_factor
_lowerCAmelCase : Optional[Any] = do_pad
_lowerCAmelCase : str = pad_size
def a ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ ):
'''simple docstring'''
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def a ( self , snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = get_image_size(__lowerCamelCase )
_lowerCAmelCase : str = (old_height // size + 1) * size - old_height
_lowerCAmelCase : Tuple = (old_width // size + 1) * size - old_width
return pad(__lowerCamelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__lowerCamelCase )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Tuple = do_pad if do_pad is not None else self.do_pad
_lowerCAmelCase : Optional[int] = pad_size if pad_size is not None else self.pad_size
_lowerCAmelCase : Tuple = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase : Dict = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_rescale:
_lowerCAmelCase : str = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_pad:
_lowerCAmelCase : Dict = [self.pad(__lowerCamelCase , size=__lowerCamelCase ) for image in images]
_lowerCAmelCase : List[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
_lowerCAmelCase : int = {'pixel_values': images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 444
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Optional[Any] = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 615
| 0
|
from __future__ import annotations
from collections.abc import Generator
def SCREAMING_SNAKE_CASE__ ( ) -> Generator[int, None, None]:
_lowercase = {}
_lowercase = 2
while True:
_lowercase = factor_map.pop(snake_case__ , snake_case__ )
if factor:
_lowercase = factor + prime
while x in factor_map:
x += factor
_lowercase = factor
else:
_lowercase = prime
yield prime
prime += 1
def SCREAMING_SNAKE_CASE__ ( snake_case__ :float = 1E10 ) -> int:
_lowercase = sieve()
_lowercase = 1
while True:
_lowercase = next(snake_case__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(snake_case__ )
n += 2
if __name__ == "__main__":
print(solution())
| 703
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 535
| 0
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a__ : Dict = HfApi()
a__ : List[str] = {}
# fmt: off
a__ : Dict = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
a__ : Dict = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
a__ : str = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
a__ : Dict = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
a__ : Any = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
a__ : Optional[int] = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
a__ : Optional[int] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
a__ : str = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
a__ : Union[str, Any] = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
a__ : int = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
a__ : List[str] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
a__ : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
a__ : Any = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
a__ : List[Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
a__ : List[Any] = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
a__ : Optional[Any] = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a__ : Tuple = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
a__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
a__ : List[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a__ : Tuple = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a__ : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 51
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
A_ = torch.tensor(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1
A_ = model(__UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple
A_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
A_ = logits[0, masked_index, :]
A_ = logits.softmax(dim=0 )
A_ , A_ = prob.topk(k=__UpperCamelCase , dim=0 )
A_ = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__UpperCamelCase ) )] )
A_ = tokenizer.mask_token
A_ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
A_ = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(__UpperCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(__UpperCamelCase ) , __UpperCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__UpperCamelCase , __UpperCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
SCREAMING_SNAKE_CASE : Optional[int] = CamembertTokenizer.from_pretrained("camembert-base")
SCREAMING_SNAKE_CASE : Optional[int] = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 141
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Tuple = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 1
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Any , **_A : Optional[int] ):
requires_backends(self , ['''bs4'''] )
super().__init__(**_A )
def UpperCamelCase_ ( self : Dict , _A : List[str] ):
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_UpperCamelCase = parent.find_all(child.name , recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) )
_UpperCamelCase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCamelCase_ ( self : List[Any] , _A : int ):
_UpperCamelCase = BeautifulSoup(_A , '''html.parser''' )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_UpperCamelCase = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
_UpperCamelCase , _UpperCamelCase = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCamelCase_ ( self : int , _A : int , _A : int ):
_UpperCamelCase = ''''''
for tagname, subs in zip(_A , _A ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : str , _A : Optional[Any] ):
_UpperCamelCase = False
# Check that strings has a valid type
if isinstance(_A , _A ):
_UpperCamelCase = True
elif isinstance(_A , (list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] , _A ):
_UpperCamelCase = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_A )}.""" )
_UpperCamelCase = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) )
if not is_batched:
_UpperCamelCase = [html_strings]
# Get nodes + xpaths
_UpperCamelCase = []
_UpperCamelCase = []
for html_string in html_strings:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.get_three_from_single(_A )
nodes.append(_A )
_UpperCamelCase = []
for node, tag_list, sub_list in zip(_A , _A , _A ):
_UpperCamelCase = self.construct_xpath(_A , _A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
_UpperCamelCase = {'''nodes''': nodes, '''xpaths''': xpaths}
_UpperCamelCase = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 10
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase )
# set absolute/relative position embeddings parameter
__UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Any = True
# hparam_utils.py hparams
__UpperCAmelCase : Union[str, Any] = 0.664694
__UpperCAmelCase : Union[str, Any] = 0.207951
__UpperCAmelCase : int = 0.121194
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[str] = 0.0352513
__UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = False
# hparam_utils.py hparams
__UpperCAmelCase : int = 36.4519
__UpperCAmelCase : str = 0.903421
__UpperCAmelCase : Dict = 222.088
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = 0.763141
__UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "TABFACT":
__UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase )
elif task == "MLM":
__UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
__UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 77
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The column name of the images in the files."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self ):
a ={}
if self.train_dir is not None:
a =self.train_dir
if self.validation_dir is not None:
a =self.validation_dir
a =data_files if data_files else None
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = field(
default=UpperCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
_SCREAMING_SNAKE_CASE : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_SCREAMING_SNAKE_CASE : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
_SCREAMING_SNAKE_CASE : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_SCREAMING_SNAKE_CASE : float = field(
default=0.7_5 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
_SCREAMING_SNAKE_CASE : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class UpperCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : float = field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def lowerCamelCase ( UpperCAmelCase_ : Any )-> List[Any]:
"""simple docstring"""
a =torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCamelCase ( )-> Any:
"""simple docstring"""
a =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a =training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
a =None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase_ ) and data_args.train_val_split > 0.0:
a =ds["""train"""].train_test_split(data_args.train_val_split )
a =split["""train"""]
a =split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a ={
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
a =ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCAmelCase_ )
elif model_args.model_name_or_path:
a =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase_ )
else:
a =ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
a =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase_ )
elif model_args.model_name_or_path:
a =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase_ )
else:
a =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
a =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
a =ViTMAEForPreTraining(UpperCAmelCase_ )
if training_args.do_train:
a =ds["""train"""].column_names
else:
a =ds["""validation"""].column_names
if data_args.image_column_name is not None:
a =data_args.image_column_name
elif "image" in column_names:
a ="""image"""
elif "img" in column_names:
a ="""img"""
else:
a =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
a =image_processor.size["""shortest_edge"""]
else:
a =(image_processor.size["""height"""], image_processor.size["""width"""])
a =Compose(
[
Lambda(lambda UpperCAmelCase_ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCAmelCase_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCAmelCase_ : Union[str, Any] ):
a =[transforms(UpperCAmelCase_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
a =ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
a =(
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase_ )
# Compute absolute learning rate
a =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
a =training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
a =Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , )
# Training
if training_args.do_train:
a =None
if training_args.resume_from_checkpoint is not None:
a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a =last_checkpoint
a =trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a =trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase_ )
trainer.save_metrics("""eval""" , UpperCAmelCase_ )
# Write model card and (optionally) push to hub
a ={
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def lowerCamelCase ( UpperCAmelCase_ : List[str] )-> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 321
|
import argparse
import copy
def lowerCamelCase ( UpperCAmelCase_ : str )-> str:
"""simple docstring"""
a ={}
with open(UpperCAmelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
a =[]
_list.append([line.split()[1], line.split()[2]] )
a =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
a =[]
_list.append([line.split()[0], line.split()[2]] )
a =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
with open(UpperCAmelCase_ ) as f:
a =f.read(1 )
a =start_node
a =[]
a =start_node
a =0
while visiting not in first_solution:
a =1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCAmelCase_ ) and k[0] not in first_solution:
a =k[1]
a =k[0]
first_solution.append(UpperCAmelCase_ )
a =distance_of_first_solution + int(UpperCAmelCase_ )
a =best_node
first_solution.append(UpperCAmelCase_ )
a =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
a =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any )-> Any:
"""simple docstring"""
a =[]
for n in solution[1:-1]:
a =solution.index(UpperCAmelCase_ )
for kn in solution[1:-1]:
a =solution.index(UpperCAmelCase_ )
if n == kn:
continue
a =copy.deepcopy(UpperCAmelCase_ )
a =kn
a =n
a =0
for k in _tmp[:-1]:
a =_tmp[_tmp.index(UpperCAmelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
a =distance + int(i[1] )
_tmp.append(UpperCAmelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
a =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCAmelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str )-> int:
"""simple docstring"""
a =1
a =first_solution
a =[]
a =distance_of_first_solution
a =solution
while count <= iters:
a =find_neighborhood(UpperCAmelCase_ , UpperCAmelCase_ )
a =0
a =neighborhood[index_of_best_solution]
a =len(UpperCAmelCase_ ) - 1
a =False
while not found:
a =0
while i < len(UpperCAmelCase_ ):
if best_solution[i] != solution[i]:
a =best_solution[i]
a =solution[i]
break
a =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
a =True
a =best_solution[:-1]
a =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
a =cost
a =solution
else:
a =index_of_best_solution + 1
a =neighborhood[index_of_best_solution]
if len(UpperCAmelCase_ ) >= size:
tabu_list.pop(0 )
a =count + 1
return best_solution_ever, best_cost
def lowerCamelCase ( UpperCAmelCase_ : Optional[Any]=None )-> List[str]:
"""simple docstring"""
a =generate_neighbours(args.File )
a , a =generate_first_solution(
args.File , UpperCAmelCase_ )
a , a =tabu_search(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 321
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( lowerCamelCase : Sequence[float] , lowerCamelCase : int , lowerCamelCase : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
lowerCAmelCase = (low + high) // 2
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = max_subarray(a__ , a__ , a__ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = max_subarray(a__ , mid + 1 , a__ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = max_cross_sum(a__ , a__ , a__ , a__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( lowerCamelCase : Sequence[float] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase , lowerCAmelCase = float('-inf' ), -1
lowerCAmelCase , lowerCAmelCase = float('-inf' ), -1
lowerCAmelCase = 0
for i in range(a__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
lowerCAmelCase = summ
lowerCAmelCase = i
lowerCAmelCase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
lowerCAmelCase = summ
lowerCAmelCase = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = [randint(1 , a__ ) for _ in range(a__ )]
lowerCAmelCase = time.time()
max_subarray(a__ , 0 , input_size - 1 )
lowerCAmelCase = time.time()
return end - start
def a_ ( ):
lowerCAmelCase = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
lowerCAmelCase = [time_max_subarray(a__ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(a__ , a__ ):
print(a__ , '\t\t' , a__ )
plt.plot(a__ , a__ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 133
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( a__ : list[float] , a__ : list[float] ) -> float:
_UpperCamelCase = sorted(numsa + numsa )
_UpperCamelCase , _UpperCamelCase = divmod(len(a__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 420
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase = 1_00 ,):
"""simple docstring"""
_UpperCAmelCase = x_start
_UpperCAmelCase = fnc(lowerCamelCase__ )
_UpperCAmelCase = 0.0
for _ in range(lowerCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCAmelCase = (x_end - x_start) / steps + xa
_UpperCAmelCase = fnc(lowerCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCAmelCase = xa
_UpperCAmelCase = fxa
return area
if __name__ == "__main__":
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
UpperCAmelCase__ = 1_0
while i <= 1_0_0_0_0_0:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 1_0
| 710
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = abs(lowercase )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = abs(lowercase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return sum(int(lowercase ) for c in str(abs(lowercase ) ) )
def __UpperCAmelCase ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase ,lowercase ) -> None:
_UpperCAmelCase = f'''{func.__name__}({value})'''
_UpperCAmelCase = timeit(f'''__main__.{call}''' ,setup="""import __main__""" )
print(f'''{call:56} = {func(lowercase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowercase ,lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 275
| 0
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :Tuple ) ->Tuple:
lowercase : str = """hf-internal-testing/tiny-random-t5"""
lowercase : List[Any] = AutoTokenizer.from_pretrained(__magic_name__ )
lowercase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ )
lowercase : int = tokenizer("""This is me""" , return_tensors="""pt""" )
lowercase : Optional[int] = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase : Any = model.generate(**__magic_name__ )
lowercase : Optional[int] = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
lowercase : Dict = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase : List[Any] = model_reloaded.generate(**__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ ) )
def __snake_case ( self :int ) ->Optional[Any]:
lowercase : List[str] = """hf-internal-testing/tiny-random-t5"""
lowercase : str = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ )
lowercase : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__magic_name__ ):
model.save_pretrained(__magic_name__ )
lowercase : List[str] = model.reverse_bettertransformer()
model.save_pretrained(__magic_name__ )
| 264
|
from __future__ import annotations
from typing import Any
def A__ ( _a : list[Any] ):
'''simple docstring'''
create_state_space_tree(_a , [] , 0 )
def A__ ( _a : list[Any] , _a : list[Any] , _a : int ):
'''simple docstring'''
if index == len(_a ):
print(_a )
return
create_state_space_tree(_a , _a , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_a , _a , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__lowerCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 385
| 0
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def _UpperCamelCase ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCAmelCase__ : Any = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase__ : Tuple = json.loads(UpperCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase__ : Optional[int] = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase__ : List[str] = json.loads(UpperCamelCase__ )
if not mpi_options.get("""sagemaker_mpi_enabled""" , UpperCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _snake_case ( a__ ):
lowerCAmelCase :str = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def snake_case__ ( self):
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _lowerCamelCase , )
@cached_property
def snake_case__ ( self):
logger.info("""PyTorch: setting up devices""")
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""")
if self.no_cuda:
UpperCAmelCase__ : List[Any] = torch.device("""cpu""")
UpperCAmelCase__ : Optional[int] = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase__ : str = smp.local_rank()
UpperCAmelCase__ : Any = torch.device("""cuda""" , _lowerCamelCase)
UpperCAmelCase__ : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta)
UpperCAmelCase__ : Union[str, Any] = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK"""))
UpperCAmelCase__ : int = torch.device("""cuda""" , self.local_rank)
UpperCAmelCase__ : Dict = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase__ : int = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""")
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase__ : int = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta)
UpperCAmelCase__ : Dict = torch.device("""cuda""" , self.local_rank)
UpperCAmelCase__ : Tuple = 1
if device.type == "cuda":
torch.cuda.set_device(_lowerCamelCase)
return device
@property
def snake_case__ ( self):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def snake_case__ ( self):
return not is_sagemaker_model_parallel_available()
@property
def snake_case__ ( self):
return False
| 113
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class _snake_case ( a__ ):
lowerCAmelCase :Any = '''xlm'''
lowerCAmelCase :Any = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self , _lowerCamelCase=3_0145 , _lowerCamelCase=2048 , _lowerCamelCase=12 , _lowerCamelCase=16 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=True , _lowerCamelCase=512 , _lowerCamelCase=2048**-0.5 , _lowerCamelCase=1e-1_2 , _lowerCamelCase=0.02 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=5 , _lowerCamelCase=True , _lowerCamelCase="first" , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=5 , _lowerCamelCase=5 , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=0 , **_lowerCamelCase , ):
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Tuple = emb_dim
UpperCAmelCase__ : Optional[Any] = n_layers
UpperCAmelCase__ : List[str] = n_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Optional[int] = attention_dropout
UpperCAmelCase__ : Tuple = gelu_activation
UpperCAmelCase__ : Optional[Any] = sinusoidal_embeddings
UpperCAmelCase__ : int = causal
UpperCAmelCase__ : Union[str, Any] = asm
UpperCAmelCase__ : Optional[Any] = n_langs
UpperCAmelCase__ : List[Any] = use_lang_emb
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : List[str] = bos_index
UpperCAmelCase__ : List[Any] = eos_index
UpperCAmelCase__ : int = pad_index
UpperCAmelCase__ : str = unk_index
UpperCAmelCase__ : Dict = mask_index
UpperCAmelCase__ : str = is_encoder
UpperCAmelCase__ : Dict = max_position_embeddings
UpperCAmelCase__ : Any = embed_init_std
UpperCAmelCase__ : List[Any] = init_std
UpperCAmelCase__ : List[str] = summary_type
UpperCAmelCase__ : Union[str, Any] = summary_use_proj
UpperCAmelCase__ : Any = summary_activation
UpperCAmelCase__ : List[str] = summary_proj_to_labels
UpperCAmelCase__ : Union[str, Any] = summary_first_dropout
UpperCAmelCase__ : str = start_n_top
UpperCAmelCase__ : str = end_n_top
UpperCAmelCase__ : Tuple = mask_token_id
UpperCAmelCase__ : Union[str, Any] = lang_id
if "n_words" in kwargs:
UpperCAmelCase__ : List[str] = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , **_lowerCamelCase)
class _snake_case ( a__ ):
@property
def snake_case__ ( self):
if self.task == "multiple-choice":
UpperCAmelCase__ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 113
| 1
|
'''simple docstring'''
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : int = size
A_ : Dict = [0] * size
A_ : Any = [0] * size
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def _a ( self : List[Any] ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = value
while index < self.size:
A_ : Optional[int] = self.get_prev(_a ) + 1
if current_left_border == index:
A_ : Dict = value
else:
A_ : Tuple = max(_a ,_a ,_a )
A_ : List[str] = self.get_next(_a )
def _a ( self : List[str] ,_a : int ,_a : int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
A_ : List[Any] = 0
while left <= right:
A_ : List[str] = self.get_prev(_a )
if left <= current_left:
A_ : int = max(_a ,self.tree[right] )
A_ : int = current_left
else:
A_ : str = max(_a ,self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
|
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
| 1
|
from __future__ import annotations
def snake_case_ ( snake_case , snake_case , snake_case , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335
| 0
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCamelCase__ ( unittest.TestCase , _a ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = load_tool("""text-classification""" )
self.tool.setup()
__lowercase = load_tool("""text-classification""" , remote=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.tool("""That\'s quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , """positive""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.remote_tool("""That\'s quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , """positive""" )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.tool(text="""That\'s quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , """positive""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.remote_tool(text="""That\'s quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , """positive""" )
| 616
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ :Tuple = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[Any] = ['''DeiTFeatureExtractor''']
lowerCAmelCase__ :Union[str, Any] = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Tuple = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 618
| 0
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = FileLock(str(tmpdir / "foo.lock" ) )
SCREAMING_SNAKE_CASE_ = FileLock(str(tmpdir / "foo.lock" ) )
SCREAMING_SNAKE_CASE_ = 0.01
with locka.acquire():
with pytest.raises(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = time.time()
locka.acquire(__UpperCamelCase )
assert time.time() - _start > timeout
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = "a" * 1_0_0_0 + ".lock"
SCREAMING_SNAKE_CASE_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(__UpperCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
SCREAMING_SNAKE_CASE_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__UpperCamelCase ):
locka.acquire(0 )
| 705
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE_ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__magic_name__ )
def __A ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = logging.get_verbosity()
SCREAMING_SNAKE_CASE_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(__magic_name__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __A ( self : int ) -> Optional[Any]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ = os.getenv("TRANSFORMERS_VERBOSITY" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE_ = logging.get_verbosity()
self.assertEqual(
__magic_name__ , __magic_name__ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
SCREAMING_SNAKE_CASE_ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __A ( self : Dict ) -> List[Any]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ = logging.logging.getLogger()
with CaptureLogger(__magic_name__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def __A ( self : Dict ) -> Optional[int]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(__magic_name__ ) as cl:
logger.warning_advice(__magic_name__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__magic_name__ ) as cl:
logger.warning_advice(__magic_name__ )
self.assertEqual(cl.out , msg + "\n" )
def a__ ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 356
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : int=32 * 4 , UpperCAmelCase_ : str=32 * 6 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=32 , ):
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_auxiliary_loss
SCREAMING_SNAKE_CASE : Tuple = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Any = min_size
SCREAMING_SNAKE_CASE : Union[str, Any] = max_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : Tuple = mask_feature_size
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase_ ) > 0.5
).float()
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase_ ) > 0.5).long()
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _A ( self : Any ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _A ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = output.encoder_hidden_states
SCREAMING_SNAKE_CASE : List[Any] = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase_ ) , config.decoder_config.decoder_layers )
def _A ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str]=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerForInstanceSegmentation(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
def comm_check_on_output(UpperCAmelCase_ : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCAmelCase_ )
comm_check_on_output(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ )
comm_check_on_output(UpperCAmelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase_ : List[str] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase_ : int = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : int = False
UpperCamelCase_ : Optional[Any] = False
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Any = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def _A ( self : List[str] ):
self.config_tester.run_common_tests()
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def _A ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def _A ( self : Optional[int] ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def _A ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def _A ( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _A ( self : List[Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self : Tuple ):
pass
def _A ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
@slow
def _A ( self : List[Any] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE : Any = MaskFormerModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE : Any = {
"pixel_values": torch.randn((2, 3, *size) , device=UpperCAmelCase_ ),
"mask_labels": torch.randn((2, 10, *size) , device=UpperCAmelCase_ ),
"class_labels": torch.zeros(2 , 10 , device=UpperCAmelCase_ ).long(),
}
SCREAMING_SNAKE_CASE : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = model(**UpperCAmelCase_ )
self.assertTrue(outputs.loss is not None )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(UpperCAmelCase_ ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = model(**UpperCAmelCase_ , output_attentions=UpperCAmelCase_ )
self.assertTrue(outputs.attentions is not None )
def _A ( self : Tuple ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE : str = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Dict = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
SCREAMING_SNAKE_CASE : Dict = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ ).loss
loss.backward()
def _A ( self : Optional[Any] ):
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE : str = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Optional[int] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
SCREAMING_SNAKE_CASE : Tuple = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case = 1e-4
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : Tuple ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(UpperCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(UpperCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(UpperCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(UpperCAmelCase_ )
.eval()
)
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**UpperCAmelCase_ )
# masks_queries_logits
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE : Optional[int] = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(UpperCAmelCase_ ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
# class_queries_logits
SCREAMING_SNAKE_CASE : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(UpperCAmelCase_ )
.eval()
)
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**UpperCAmelCase_ )
# masks_queries_logits
SCREAMING_SNAKE_CASE : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE : Dict = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(UpperCAmelCase_ ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
# class_queries_logits
SCREAMING_SNAKE_CASE : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(UpperCAmelCase_ )
.eval()
)
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : Dict = inputs["pixel_values"].to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [el.to(UpperCAmelCase_ ) for el in inputs["mask_labels"]]
SCREAMING_SNAKE_CASE : Optional[int] = [el.to(UpperCAmelCase_ ) for el in inputs["class_labels"]]
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**UpperCAmelCase_ )
self.assertTrue(outputs.loss is not None )
| 62
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase__ : Dict = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
lowerCamelCase__ : str = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase__ : Any = primes[:idx]
break
lowerCamelCase__ , lowerCamelCase__ : int = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase__ : Optional[Any] = False
for r in range(_lowerCamelCase ):
lowerCamelCase__ : Any = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase__ : Dict = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase_ ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 696
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696
| 1
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = CpmAntTokenizer
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[str]:
'''simple docstring'''
super().setUp()
lowerCamelCase__: Optional[int] =[
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
lowerCamelCase__: Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
@tooslow
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b")
lowerCamelCase__: Union[str, Any] ="今天天气真好!"
lowerCamelCase__: int =["今天", "天气", "真", "好", "!"]
lowerCamelCase__: List[str] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Dict ="今天天气真好!"
lowerCamelCase__: str =[tokenizer.bos_token] + tokens
lowerCamelCase__: Union[str, Any] =[6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokenizer.decode(UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
| 59
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( A , A , A , A=1024 ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ =[], []
UpperCAmelCase__ =list(zip(A , A ) )
UpperCAmelCase__ , UpperCAmelCase__ =sorted_examples[0]
def is_too_big(A ):
return tok(A , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase__ =new_src + " " + src
UpperCAmelCase__ =new_tgt + " " + tgt
if is_too_big(A ) or is_too_big(A ): # cant fit, finalize example
finished_src.append(A )
finished_tgt.append(A )
UpperCAmelCase__ , UpperCAmelCase__ =src, tgt
else: # can fit, keep adding
UpperCAmelCase__ , UpperCAmelCase__ =cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(A )
finished_tgt.append(A )
return finished_src, finished_tgt
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =Path(A )
save_path.mkdir(exist_ok=A )
for split in ["train"]:
UpperCAmelCase__ , UpperCAmelCase__ =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
UpperCAmelCase__ =[x.rstrip() for x in Path(A ).open().readlines()]
UpperCAmelCase__ =[x.rstrip() for x in Path(A ).open().readlines()]
UpperCAmelCase__ , UpperCAmelCase__ =pack_examples(A , A , A , A )
print(F"""packed {split} split from {len(A )} examples -> {len(A )}.""" )
Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(A ) )
Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(A ) )
for split in ["val", "test"]:
UpperCAmelCase__ , UpperCAmelCase__ =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(A , save_path / F"""{split}.source""" )
shutil.copyfile(A , save_path / F"""{split}.target""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=A , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=A , default=128 )
parser.add_argument("--data_dir" , type=A )
parser.add_argument("--save_path" , type=A )
UpperCAmelCase__ =parser.parse_args()
UpperCAmelCase__ =AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(A , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 625
| 0
|
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = 1
for i in range(1 ,num + 1 ):
fact *= i
return fact
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = 0
while number > 0:
A__ = number % 10
sum_of_digits += last_digit
A__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __lowerCamelCase ( lowerCAmelCase__ = 100 ):
A__ = factorial(_lowerCamelCase )
A__ = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 700
|
"""simple docstring"""
import os
import numpy
import onnx
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = a.name
A__ = b.name
A__ = ''
A__ = ''
A__ = a == b
A__ = name_a
A__ = name_b
return res
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase__ ,lowerCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g ,lowerCAmelCase__ ,lowerCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g ,lowerCAmelCase__ ,lowerCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g ,lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = list(model.graph.initializer )
A__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A__ = inits[i].name
A__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph ,lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = os.path.dirname(lowerCAmelCase__ )
A__ = os.path.basename(lowerCAmelCase__ )
A__ = onnx.load(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) )
A__ = list(model.graph.initializer )
A__ = set()
A__ = {}
A__ = []
A__ = 0
for i in range(len(lowerCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 ,len(lowerCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] ,inits[j] ):
dup_set.add(lowerCAmelCase__ )
dup_set.add(lowerCAmelCase__ )
A__ = inits[j].data_type
A__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' ,lowerCAmelCase__ )
total_reduced_size += mem_size
A__ = inits[i].name
A__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase__ )
else:
A__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' ,total_reduced_size / 1024 / 1024 / 1024 ,'GB' )
A__ = sorted(lowerCAmelCase__ )
_remove_dup_initializers_from_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
A__ = 'optimized_' + model_file_name
A__ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
onnx.save(lowerCAmelCase__ ,lowerCAmelCase__ )
return new_model
| 554
| 0
|
"""simple docstring"""
import random
from typing import Any
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list[Any]:
'''simple docstring'''
for _ in range(len(__lowerCAmelCase ) ):
lowercase_ = random.randint(0 , len(__lowerCAmelCase ) - 1 )
lowercase_ = random.randint(0 , len(__lowerCAmelCase ) - 1 )
lowercase_ , lowercase_ = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase : Optional[Any] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 567
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase : List[str] = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "facebook/nllb-200-distilled-600M"
lowercase__ = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowercase__ = "translator"
lowercase__ = AutoTokenizer
lowercase__ = AutoModelForSeqaSeqLM
lowercase__ = LANGUAGE_CODES
lowercase__ = ["text", "text", "text"]
lowercase__ = ["text"]
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
lowercase_ = self.lang_to_code[src_lang]
lowercase_ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase_ , return_tensors="""pt""" , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : int):
"""simple docstring"""
return self.model.generate(**lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase_)
| 567
| 1
|
def _lowerCamelCase( UpperCamelCase__ : int , UpperCamelCase__ : list ) -> Union[str, Any]:
_enforce_args(UpperCamelCase__ , UpperCamelCase__ )
if n == 0:
return 0
A : Dict = float('''-inf''' )
for i in range(1 , n + 1 ):
A : int = max(
UpperCamelCase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCamelCase__ ) )
return max_revue
def _lowerCamelCase( UpperCamelCase__ : int , UpperCamelCase__ : list ) -> Any:
_enforce_args(UpperCamelCase__ , UpperCamelCase__ )
A : Union[str, Any] = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowerCamelCase( UpperCamelCase__ : int , UpperCamelCase__ : list , UpperCamelCase__ : list ) -> Any:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
A : int = float('''-inf''' )
for i in range(1 , n + 1 ):
A : Tuple = max(
UpperCamelCase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCamelCase__ , UpperCamelCase__ ) , )
A : str = max_revenue
return max_rev[n]
def _lowerCamelCase( UpperCamelCase__ : int , UpperCamelCase__ : list ) -> str:
_enforce_args(UpperCamelCase__ , UpperCamelCase__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
A : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
A : Dict = 0
for i in range(1 , n + 1 ):
A : Any = max_rev[i]
for j in range(1 , i + 1 ):
A : int = max(UpperCamelCase__ , prices[j - 1] + max_rev[i - j] )
A : Any = max_revenue_i
return max_rev[n]
def _lowerCamelCase( UpperCamelCase__ : int , UpperCamelCase__ : list ) -> Union[str, Any]:
if n < 0:
A : str = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(UpperCamelCase__ )
if n > len(UpperCamelCase__ ):
A : List[str] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(UpperCamelCase__ )}'''
)
raise ValueError(UpperCamelCase__ )
def _lowerCamelCase( ) -> Optional[Any]:
A : int = [6, 10, 12, 15, 20, 23]
A : Optional[Any] = len(UpperCamelCase__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
A : Tuple = 36
A : Any = top_down_cut_rod(UpperCamelCase__ , UpperCamelCase__ )
A : str = bottom_up_cut_rod(UpperCamelCase__ , UpperCamelCase__ )
A : Any = naive_cut_rod_recursive(UpperCamelCase__ , UpperCamelCase__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 705
|
'''simple docstring'''
def _lowerCamelCase( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> Optional[int]:
A : Optional[int] = 0
A : str = len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A : Any = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
A : Union[str, Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A : Tuple = left
A : Tuple = point
elif point > right:
A : Optional[Any] = right
A : str = point
else:
if item < current_item:
A : Tuple = point - 1
else:
A : Optional[Any] = point + 1
return None
def _lowerCamelCase( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> List[Any]:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A : Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , point + 1 , UpperCamelCase__ )
def _lowerCamelCase( UpperCamelCase__ : Optional[int] ) -> List[Any]:
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
snake_case_ = 0
if debug == 1:
snake_case_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
snake_case_ = 67
snake_case_ = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("""Not found""")
| 537
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : str = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase : ClassVar[Features] = Features({'text': Value('string' )} )
lowercase : ClassVar[Features] = Features({'labels': ClassLabel} )
lowercase : str = "text"
lowercase : str = "labels"
def __lowerCamelCase ( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__UpperCamelCase : Any = copy.deepcopy(self )
__UpperCamelCase : Any = self.label_schema.copy()
__UpperCamelCase : List[Any] = features[self.label_column]
__UpperCamelCase : Any = label_schema
return task_template
@property
def __lowerCamelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 327
| 0
|
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _A ( A ,A ) -> List[Any]:
assert isinstance(A ,A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def _A ( A ,A ,A ) -> Tuple:
lowercase = tmp_path / "cache"
lowercase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase = ParquetDatasetReader(A ,cache_dir=A ,keep_in_memory=A ).read()
_check_parquet_dataset(A ,A )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def _A ( A ,A ,A ) -> Optional[Any]:
lowercase = tmp_path / "cache"
lowercase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase = features.copy() if features else default_expected_features
lowercase = (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = ParquetDatasetReader(A ,features=A ,cache_dir=A ).read()
_check_parquet_dataset(A ,A )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def _A ( A ,A ,A ) -> Optional[int]:
lowercase = tmp_path / "cache"
lowercase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase = ParquetDatasetReader(A ,cache_dir=A ,split=A ).read()
_check_parquet_dataset(A ,A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" ,[str, list] )
def _A ( A ,A ,A ) -> Any:
if issubclass(A ,A ):
lowercase = parquet_path
elif issubclass(A ,A ):
lowercase = [parquet_path]
lowercase = tmp_path / "cache"
lowercase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase = ParquetDatasetReader(A ,cache_dir=A ).read()
_check_parquet_dataset(A ,A )
def _A ( A ,A ,A=("train",) ) -> Any:
assert isinstance(A ,A )
for split in splits:
lowercase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def _A ( A ,A ,A ) -> Union[str, Any]:
lowercase = tmp_path / "cache"
lowercase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase = ParquetDatasetReader(
{"train": parquet_path} ,cache_dir=A ,keep_in_memory=A ).read()
_check_parquet_datasetdict(A ,A )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def _A ( A ,A ,A ) -> Optional[int]:
lowercase = tmp_path / "cache"
lowercase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase = features.copy() if features else default_expected_features
lowercase = (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = ParquetDatasetReader({"train": parquet_path} ,features=A ,cache_dir=A ).read()
_check_parquet_datasetdict(A ,A )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def _A ( A ,A ,A ) -> List[Any]:
if split:
lowercase = {split: parquet_path}
else:
lowercase = "train"
lowercase = {"train": parquet_path, "test": parquet_path}
lowercase = tmp_path / "cache"
lowercase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase = ParquetDatasetReader(A ,cache_dir=A ).read()
_check_parquet_datasetdict(A ,A ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _A ( A ,A ) -> Union[str, Any]:
lowercase = ParquetDatasetWriter(A ,tmp_path / "foo.parquet" )
assert writer.write() > 0
lowercase = pq.ParquetFile(tmp_path / "foo.parquet" )
lowercase = pf.read()
assert dataset.data.table == output_table
def _A ( A ,A ) -> str:
lowercase = str(shared_datadir / "test_image_rgb.jpg" )
lowercase = {"image": [image_path]}
lowercase = Features({"image": Image()} )
lowercase = Dataset.from_dict(A ,features=A )
lowercase = ParquetDatasetWriter(A ,tmp_path / "foo.parquet" )
assert writer.write() > 0
lowercase = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowercase = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) ,streaming=A ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" ,[
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def _A ( A ,A ) -> int:
assert get_writer_batch_size(A ) == expected
| 706
|
'''simple docstring'''
def _A ( A ,A ,A ,A ,A ) -> int:
if index == number_of_items:
return 0
lowercase : Optional[int] = 0
lowercase : Union[str, Any] = 0
lowercase : Dict = knapsack(A ,A ,A ,A ,index + 1 )
if weights[index] <= max_weight:
lowercase : List[str] = values[index] + knapsack(
A ,A ,A ,max_weight - weights[index] ,index + 1 )
return max(A ,A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425
| 0
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowercase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> np.ndarray:
_snake_case : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return cva.warpAffine(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (rows, cols) )
if __name__ == "__main__":
# read original image
a__ = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
a__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a__, a__ = gray_img.shape
# set different points to rotate image
a__ = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
a__ = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
a__ = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
a__ = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
a__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a__ = plt.figure(1)
a__ = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 477
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case ( enum.Enum ):
'''simple docstring'''
snake_case_ : Any = 0
snake_case_ : Tuple = 1
snake_case_ : int = 2
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case : int = None
if self.model.config.prefix is not None:
_snake_case : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case : List[str] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case : Optional[int] = self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params)
_snake_case : Dict = {**self._preprocess_params, **preprocess_params}
_snake_case : Optional[int] = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : int=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=None , **lowerCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[Any] = {}
if prefix is not None:
_snake_case : Tuple = prefix
if prefix:
_snake_case : Optional[int] = self.tokenizer(
lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework)
_snake_case : Any = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""")
_snake_case : Tuple = handle_long_generation
preprocess_params.update(lowerCAmelCase)
_snake_case : str = generate_kwargs
_snake_case : Optional[int] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
_snake_case : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
_snake_case : Tuple = ReturnType.TENSORS
if return_type is not None:
_snake_case : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
_snake_case : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case : str = self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
if len(lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
_snake_case : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase)
def __call__( self : Tuple , lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
return super().__call__(lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : str="" , lowerCAmelCase : Any=None , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
_snake_case : Dict = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework)
_snake_case : Tuple = prompt_text
if handle_long_generation == "hole":
_snake_case : int = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case : Optional[int] = generate_kwargs["""max_new_tokens"""]
else:
_snake_case : Dict = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case : Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
_snake_case : List[Any] = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case : Optional[int] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : int , lowerCAmelCase : Dict , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = model_inputs["""input_ids"""]
_snake_case : List[Any] = model_inputs.get("""attention_mask""" , lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Any = 1
else:
_snake_case : List[Any] = input_ids.shape[0]
_snake_case : Tuple = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case : Any = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
_snake_case : Dict = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case : Optional[int] = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case : str = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case : Optional[int] = self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Tuple = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case : List[Any] = generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
_snake_case : Dict = tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=ReturnType.FULL_TEXT , lowerCAmelCase : Union[str, Any]=True) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = model_outputs["""generated_sequence"""][0]
_snake_case : List[str] = model_outputs["""input_ids"""]
_snake_case : Optional[Any] = model_outputs["""prompt_text"""]
_snake_case : str = generated_sequence.numpy().tolist()
_snake_case : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case : Union[str, Any] = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case : int = self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case : str = 0
else:
_snake_case : List[Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
_snake_case : Any = prompt_text + text[prompt_length:]
else:
_snake_case : Union[str, Any] = text[prompt_length:]
_snake_case : List[str] = {"""generated_text""": all_text}
records.append(lowerCAmelCase)
return records
| 477
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Any=1 ) -> Optional[Any]:
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple=0 ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = []
for old_item in old_list:
lowerCAmelCase__ = old_item.replace("in_layers.0" , "norm1" )
lowerCAmelCase__ = new_item.replace("in_layers.2" , "conv1" )
lowerCAmelCase__ = new_item.replace("out_layers.0" , "norm2" )
lowerCAmelCase__ = new_item.replace("out_layers.3" , "conv2" )
lowerCAmelCase__ = new_item.replace("emb_layers.1" , "time_emb_proj" )
lowerCAmelCase__ = new_item.replace("skip_connection" , "conv_shortcut" )
lowerCAmelCase__ = shave_segments(UpperCamelCase_ , n_shave_prefix_segments=UpperCamelCase_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]=0 ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for old_item in old_list:
lowerCAmelCase__ = old_item
lowerCAmelCase__ = new_item.replace("norm.weight" , "group_norm.weight" )
lowerCAmelCase__ = new_item.replace("norm.bias" , "group_norm.bias" )
lowerCAmelCase__ = new_item.replace("proj_out.weight" , "proj_attn.weight" )
lowerCAmelCase__ = new_item.replace("proj_out.bias" , "proj_attn.bias" )
lowerCAmelCase__ = shave_segments(UpperCamelCase_ , n_shave_prefix_segments=UpperCamelCase_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : str=None , UpperCamelCase_ : str=None , UpperCamelCase_ : str=None ) -> Any:
"""simple docstring"""
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCAmelCase__ = old_checkpoint[path]
lowerCAmelCase__ = old_tensor.shape[0] // 3
lowerCAmelCase__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCAmelCase__ = old_tensor.shape[0] // config["num_head_channels"] // 3
lowerCAmelCase__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = old_tensor.split(channels // num_heads , dim=1 )
lowerCAmelCase__ = query.reshape(UpperCamelCase_ )
lowerCAmelCase__ = key.reshape(UpperCamelCase_ )
lowerCAmelCase__ = value.reshape(UpperCamelCase_ )
for path in paths:
lowerCAmelCase__ = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCAmelCase__ = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
lowerCAmelCase__ = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
lowerCAmelCase__ = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCAmelCase__ = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCAmelCase__ = old_checkpoint[path["old"]][:, :, 0]
else:
lowerCAmelCase__ = old_checkpoint[path["old"]]
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = checkpoint["time_embed.0.weight"]
lowerCAmelCase__ = checkpoint["time_embed.0.bias"]
lowerCAmelCase__ = checkpoint["time_embed.2.weight"]
lowerCAmelCase__ = checkpoint["time_embed.2.bias"]
lowerCAmelCase__ = checkpoint["input_blocks.0.0.weight"]
lowerCAmelCase__ = checkpoint["input_blocks.0.0.bias"]
lowerCAmelCase__ = checkpoint["out.0.weight"]
lowerCAmelCase__ = checkpoint["out.0.bias"]
lowerCAmelCase__ = checkpoint["out.2.weight"]
lowerCAmelCase__ = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
lowerCAmelCase__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowerCAmelCase__ = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(UpperCamelCase_ )
}
# Retrieves the keys for the middle blocks only
lowerCAmelCase__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowerCAmelCase__ = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(UpperCamelCase_ )
}
# Retrieves the keys for the output blocks only
lowerCAmelCase__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowerCAmelCase__ = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(UpperCamelCase_ )
}
for i in range(1 , UpperCamelCase_ ):
lowerCAmelCase__ = (i - 1) // (config["num_res_blocks"] + 1)
lowerCAmelCase__ = (i - 1) % (config["num_res_blocks"] + 1)
lowerCAmelCase__ = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
lowerCAmelCase__ = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
lowerCAmelCase__ = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
lowerCAmelCase__ = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
lowerCAmelCase__ = renew_resnet_paths(UpperCamelCase_ )
lowerCAmelCase__ = {"old": F"input_blocks.{i}.0", "new": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
lowerCAmelCase__ = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase_ )
if len(UpperCamelCase_ ):
lowerCAmelCase__ = renew_attention_paths(UpperCamelCase_ )
lowerCAmelCase__ = {
"old": F"input_blocks.{i}.1",
"new": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
lowerCAmelCase__ = {
F"input_blocks.{i}.1.qkv.bias": {
"key": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"query": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"value": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"key": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"query": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"value": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase_ , config=UpperCamelCase_ , )
lowerCAmelCase__ = middle_blocks[0]
lowerCAmelCase__ = middle_blocks[1]
lowerCAmelCase__ = middle_blocks[2]
lowerCAmelCase__ = renew_resnet_paths(UpperCamelCase_ )
assign_to_checkpoint(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , config=UpperCamelCase_ )
lowerCAmelCase__ = renew_resnet_paths(UpperCamelCase_ )
assign_to_checkpoint(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , config=UpperCamelCase_ )
lowerCAmelCase__ = renew_attention_paths(UpperCamelCase_ )
lowerCAmelCase__ = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , attention_paths_to_split=UpperCamelCase_ , config=UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = i // (config["num_res_blocks"] + 1)
lowerCAmelCase__ = i % (config["num_res_blocks"] + 1)
lowerCAmelCase__ = [shave_segments(UpperCamelCase_ , 2 ) for name in output_blocks[i]]
lowerCAmelCase__ = {}
for layer in output_block_layers:
lowerCAmelCase__ , lowerCAmelCase__ = layer.split("." )[0], shave_segments(UpperCamelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCamelCase_ )
else:
lowerCAmelCase__ = [layer_name]
if len(UpperCamelCase_ ) > 1:
lowerCAmelCase__ = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
lowerCAmelCase__ = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
lowerCAmelCase__ = renew_resnet_paths(UpperCamelCase_ )
lowerCAmelCase__ = renew_resnet_paths(UpperCamelCase_ )
lowerCAmelCase__ = {"old": F"output_blocks.{i}.0", "new": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , additional_replacements=[meta_path] , config=UpperCamelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCAmelCase__ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowerCAmelCase__ = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
lowerCAmelCase__ = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(UpperCamelCase_ ) == 2:
lowerCAmelCase__ = []
if len(UpperCamelCase_ ):
lowerCAmelCase__ = renew_attention_paths(UpperCamelCase_ )
lowerCAmelCase__ = {
"old": F"output_blocks.{i}.1",
"new": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
lowerCAmelCase__ = {
F"output_blocks.{i}.1.qkv.bias": {
"key": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"query": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"value": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"key": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"query": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"value": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=UpperCamelCase_ , )
else:
lowerCAmelCase__ = renew_resnet_paths(UpperCamelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCAmelCase__ = ".".join(["output_blocks", str(UpperCamelCase_ ), path["old"]] )
lowerCAmelCase__ = ".".join(["up_blocks", str(UpperCamelCase_ ), "resnets", str(UpperCamelCase_ ), path["new"]] )
lowerCAmelCase__ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
a_ = parser.parse_args()
a_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
a_ = json.loads(f.read())
a_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
a_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
a_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
a_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
a_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 720
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 115
| 0
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_snake_case : Tuple = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
_snake_case : Optional[Any] = {"facebook/blenderbot-3B": 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __snake_case ( ):
'''simple docstring'''
lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase = bs[:]
lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__magic_name__ )
cs.append(2**8 + n )
n += 1
lowercase = [chr(__magic_name__ ) for n in cs]
return dict(zip(__magic_name__ , __magic_name__ ) )
def __snake_case ( __magic_name__ ):
'''simple docstring'''
lowercase = set()
lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase = char
return pairs
class UpperCamelCase_ ( __a ):
'''simple docstring'''
UpperCamelCase : Tuple = VOCAB_FILES_NAMES
UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int]="replace" , lowerCAmelCase__ :Dict="<s>" , lowerCAmelCase__ :Optional[int]="</s>" , lowerCAmelCase__ :int="</s>" , lowerCAmelCase__ :Any="<s>" , lowerCAmelCase__ :str="<unk>" , lowerCAmelCase__ :str="<pad>" , lowerCAmelCase__ :List[Any]="<mask>" , lowerCAmelCase__ :Dict=False , **lowerCAmelCase__ :Optional[Any] , ) ->Tuple:
lowercase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
lowercase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
lowercase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
lowercase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
lowercase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
lowercase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8" ) as vocab_handle:
lowercase = json.load(lowerCAmelCase__ )
lowercase = {v: k for k, v in self.encoder.items()}
lowercase = errors # how to handle errors in decoding
lowercase = bytes_to_unicode()
lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8" ) as merges_handle:
lowercase = merges_handle.read().split("\n" )[1:-1]
lowercase = [tuple(merge.split() ) for merge in bpe_merges]
lowercase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowercase = {}
lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Optional[Any]:
return len(self.encoder )
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE( self :List[Any] , lowerCAmelCase__ :Optional[int] ) ->Optional[int]:
if token in self.cache:
return self.cache[token]
lowercase = tuple(lowerCAmelCase__ )
lowercase = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowercase = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase = bigram
lowercase = []
lowercase = 0
while i < len(lowerCAmelCase__ ):
try:
lowercase = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase = tuple(lowerCAmelCase__ )
lowercase = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowercase = get_pairs(lowerCAmelCase__ )
lowercase = " ".join(lowerCAmelCase__ )
lowercase = word
return word
def SCREAMING_SNAKE_CASE( self :int , lowerCAmelCase__ :Optional[Any] ) ->Tuple:
lowercase = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :int ) ->List[Any]:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE( self :str , lowerCAmelCase__ :Tuple ) ->str:
return self.decoder.get(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Tuple , lowerCAmelCase__ :Optional[Any] ) ->int:
lowercase = "".join(lowerCAmelCase__ )
lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowercase = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowercase = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE( self :int , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) ->List[int]:
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :Optional[Any] ) ->Tuple:
lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowercase = " " + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE( self :Tuple , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) ->Any:
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE( self :List[str] , lowerCAmelCase__ :"Conversation" ) ->List[int]:
lowercase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__ )
lowercase = " ".join(lowerCAmelCase__ )
lowercase = self.encode(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.model_max_length:
lowercase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 441
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Union[str, Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 441
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Any = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : int = 0
while number > 0:
lowercase__ : List[str] = number % 10
sum_of_digits += last_digit
lowercase__ : Any = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : List[str] = factorial(_lowerCAmelCase )
lowercase__ : Tuple = split_and_add(_lowerCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 711
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 0
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[Any]:
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , _a , )
super().__init__(*_a , **_a)
| 503
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase = 5_0_0_0_0_0
lowercase , lowercase = os.path.split(__file__)
lowercase = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def lowerCamelCase_ ( UpperCamelCase__ : datasets.Dataset, **UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = dataset.map(**UpperCamelCase__ )
@get_duration
def lowerCamelCase_ ( UpperCamelCase__ : datasets.Dataset, **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = dataset.filter(**UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase__ = generate_example_dataset(
os.path.join(UpperCamelCase__, '''dataset.arrow''' ), UpperCamelCase__, num_examples=UpperCamelCase__ )
UpperCamelCase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=UpperCamelCase__ )
def tokenize(UpperCamelCase__ : List[Any] ):
return tokenizer(examples['''text'''] )
UpperCamelCase__ = map(UpperCamelCase__ )
UpperCamelCase__ = map(UpperCamelCase__, batched=UpperCamelCase__ )
UpperCamelCase__ = map(UpperCamelCase__, function=lambda UpperCamelCase__ : None, batched=UpperCamelCase__ )
with dataset.formatted_as(type='''numpy''' ):
UpperCamelCase__ = map(UpperCamelCase__, function=lambda UpperCamelCase__ : None, batched=UpperCamelCase__ )
with dataset.formatted_as(type='''pandas''' ):
UpperCamelCase__ = map(UpperCamelCase__, function=lambda UpperCamelCase__ : None, batched=UpperCamelCase__ )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
UpperCamelCase__ = map(UpperCamelCase__, function=lambda UpperCamelCase__ : None, batched=UpperCamelCase__ )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
UpperCamelCase__ = map(UpperCamelCase__, function=lambda UpperCamelCase__ : None, batched=UpperCamelCase__ )
UpperCamelCase__ = map(UpperCamelCase__, function=UpperCamelCase__, batched=UpperCamelCase__ )
UpperCamelCase__ = filter(UpperCamelCase__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase__, '''wb''' ) as f:
f.write(json.dumps(UpperCamelCase__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 240
| 0
|
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__magic_name__ = logging.getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any):
A_ : List[Any] = np.argmax(lowerCamelCase , axis=1)
return np.sum(outputs == labels)
def lowerCamelCase ( lowerCamelCase : List[str]):
with open(lowerCamelCase , encoding="""utf_8""") as f:
A_ : str = csv.reader(lowerCamelCase)
A_ : Tuple = []
next(lowerCamelCase) # skip the first line
for line in tqdm(lowerCamelCase):
output.append((""" """.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict):
A_ : int = []
for dataset in encoded_datasets:
A_ : int = len(lowerCamelCase)
A_ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
A_ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa)
A_ : List[str] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa)
A_ : Union[str, Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(lowerCamelCase):
A_ : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A_ : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A_ : Dict = with_conta
A_ : Dict = with_conta
A_ : str = len(lowerCamelCase) - 1
A_ : Any = len(lowerCamelCase) - 1
A_ : Optional[Any] = with_conta
A_ : Dict = with_conta
A_ : Optional[Any] = mc_label
A_ : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(lowerCamelCase) for t in all_inputs))
return tensor_datasets
def lowerCamelCase ( ):
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=lowerCamelCase , default="""openai-gpt""" , help="""pretrained model name""")
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""")
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""")
parser.add_argument(
"""--output_dir""" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=lowerCamelCase , default="""""")
parser.add_argument("""--eval_dataset""" , type=lowerCamelCase , default="""""")
parser.add_argument("""--seed""" , type=lowerCamelCase , default=42)
parser.add_argument("""--num_train_epochs""" , type=lowerCamelCase , default=3)
parser.add_argument("""--train_batch_size""" , type=lowerCamelCase , default=8)
parser.add_argument("""--eval_batch_size""" , type=lowerCamelCase , default=16)
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=lowerCamelCase , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , type=lowerCamelCase , default=1)
parser.add_argument(
"""--max_steps""" , default=-1 , type=lowerCamelCase , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCamelCase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=lowerCamelCase , default=6.25E-5)
parser.add_argument("""--warmup_steps""" , default=0 , type=lowerCamelCase , help="""Linear warmup over warmup_steps.""")
parser.add_argument("""--lr_schedule""" , type=lowerCamelCase , default="""warmup_linear""")
parser.add_argument("""--weight_decay""" , type=lowerCamelCase , default=0.01)
parser.add_argument("""--lm_coef""" , type=lowerCamelCase , default=0.9)
parser.add_argument("""--n_valid""" , type=lowerCamelCase , default=374)
parser.add_argument("""--server_ip""" , type=lowerCamelCase , default="""""" , help="""Can be used for distant debugging.""")
parser.add_argument("""--server_port""" , type=lowerCamelCase , default="""""" , help="""Can be used for distant debugging.""")
A_ : str = parser.parse_args()
print(lowerCamelCase)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""")
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
A_ : Optional[int] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""")
A_ : List[str] = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(lowerCamelCase , lowerCamelCase))
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A_ : int = ["""_start_""", """_delimiter_""", """_classify_"""]
A_ : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(lowerCamelCase)
A_ : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCamelCase)
A_ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(lowerCamelCase))
model.to(lowerCamelCase)
# Load and encode the datasets
def tokenize_and_encode(lowerCamelCase : List[Any]):
if isinstance(lowerCamelCase , lowerCamelCase):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCamelCase))
elif isinstance(lowerCamelCase , lowerCamelCase):
return obj
return [tokenize_and_encode(lowerCamelCase) for o in obj]
logger.info("""Encoding dataset...""")
A_ : Dict = load_rocstories_dataset(args.train_dataset)
A_ : Optional[Any] = load_rocstories_dataset(args.eval_dataset)
A_ : Union[str, Any] = (train_dataset, eval_dataset)
A_ : Optional[int] = tokenize_and_encode(lowerCamelCase)
# Compute the max input length for the Transformer
A_ : int = model.config.n_positions // 2 - 2
A_ : Union[str, Any] = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
A_ : Tuple = min(lowerCamelCase , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A_ : Dict = pre_process_datasets(lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase)
A_ : Optional[Any] = tensor_datasets[0], tensor_datasets[1]
A_ : List[Any] = TensorDataset(*lowerCamelCase)
A_ : Tuple = RandomSampler(lowerCamelCase)
A_ : Any = DataLoader(lowerCamelCase , sampler=lowerCamelCase , batch_size=args.train_batch_size)
A_ : List[Any] = TensorDataset(*lowerCamelCase)
A_ : Tuple = SequentialSampler(lowerCamelCase)
A_ : int = DataLoader(lowerCamelCase , sampler=lowerCamelCase , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A_ : str = args.max_steps
A_ : Union[str, Any] = args.max_steps // (len(lowerCamelCase) // args.gradient_accumulation_steps) + 1
else:
A_ : List[Any] = len(lowerCamelCase) // args.gradient_accumulation_steps * args.num_train_epochs
A_ : Union[str, Any] = list(model.named_parameters())
A_ : Optional[Any] = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
A_ : List[Any] = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], """weight_decay""": 0.0},
]
A_ : Union[str, Any] = AdamW(lowerCamelCase , lr=args.learning_rate , eps=args.adam_epsilon)
A_ : Any = get_linear_schedule_with_warmup(
lowerCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=lowerCamelCase)
if args.do_train:
A_ : int = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc="""Epoch"""):
A_ : List[str] = 0
A_ : str = 0
A_ : Union[str, Any] = tqdm(lowerCamelCase , desc="""Training""")
for step, batch in enumerate(lowerCamelCase):
A_ : Any = tuple(t.to(lowerCamelCase) for t in batch)
A_ : List[str] = batch
A_ : Any = model(lowerCamelCase , mc_token_ids=lowerCamelCase , lm_labels=lowerCamelCase , mc_labels=lowerCamelCase)
A_ : Dict = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A_ : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A_ : Any = """Training loss: {:.2e} lr: {:.2e}""".format(lowerCamelCase , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A_ : Optional[int] = model.module if hasattr(lowerCamelCase , """module""") else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A_ : Dict = os.path.join(args.output_dir , lowerCamelCase)
A_ : int = os.path.join(args.output_dir , lowerCamelCase)
torch.save(model_to_save.state_dict() , lowerCamelCase)
model_to_save.config.to_json_file(lowerCamelCase)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
A_ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
A_ : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(lowerCamelCase)
if args.do_eval:
model.eval()
A_ : Any = 0, 0
A_ : Any = 0, 0
for batch in tqdm(lowerCamelCase , desc="""Evaluating"""):
A_ : List[Any] = tuple(t.to(lowerCamelCase) for t in batch)
A_ : int = batch
with torch.no_grad():
A_ : Any = model(
lowerCamelCase , mc_token_ids=lowerCamelCase , lm_labels=lowerCamelCase , mc_labels=lowerCamelCase)
A_ : Tuple = mc_logits.detach().cpu().numpy()
A_ : Union[str, Any] = mc_labels.to("""cpu""").numpy()
A_ : Dict = accuracy(lowerCamelCase , lowerCamelCase)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
A_ : Optional[Any] = eval_loss / nb_eval_steps
A_ : Union[str, Any] = eval_accuracy / nb_eval_examples
A_ : Tuple = tr_loss / nb_tr_steps if args.do_train else None
A_ : str = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
A_ : Tuple = os.path.join(args.output_dir , """eval_results.txt""")
with open(lowerCamelCase , """w""") as writer:
logger.info("""***** Eval results *****""")
for key in sorted(result.keys()):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key]))
writer.write("""%s = %s\n""" % (key, str(result[key])))
if __name__ == "__main__":
main()
| 720
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27
| 0
|
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(SCREAMING_SNAKE_CASE__ ) )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Base Case
if index == len(SCREAMING_SNAKE_CASE__ ):
return True
# Recursive Step
for i in range(SCREAMING_SNAKE_CASE__ ):
if valid_coloring(graph[index] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Color current vertex
snake_case_ = i
# Validate coloring
if util_color(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 ):
return True
# Backtrack
snake_case_ = -1
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [-1] * len(SCREAMING_SNAKE_CASE__ )
if util_color(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 ):
return colored_vertices
return []
| 39
|
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 39
| 1
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
_SCREAMING_SNAKE_CASE = R"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string'),
'references': datasets.Value('string'),
}), homepage='https://github.com/hendrycks/math', codebase_urls=['https://github.com/hendrycks/math'], )
def UpperCamelCase__ ( self :str, snake_case :Any, snake_case :Dict):
"""simple docstring"""
_lowercase =0.0
for i, j in zip(snake_case, snake_case):
n_correct += 1.0 if math_equivalence.is_equiv(snake_case, snake_case) else 0.0
_lowercase =n_correct / len(snake_case)
return {
"accuracy": accuracy,
}
| 557
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_SCREAMING_SNAKE_CASE = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def _snake_case (_snake_case : Dict , _snake_case : List[str] , _snake_case : Any , _snake_case : Tuple , _snake_case : int) -> Dict:
for attribute in key.split('.'):
_lowercase =getattr(_snake_case , _snake_case)
if weight_type is not None:
_lowercase =getattr(_snake_case , _snake_case).shape
else:
_lowercase =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''')
if weight_type == "weight":
_lowercase =value
elif weight_type == "weight_g":
_lowercase =value
elif weight_type == "weight_v":
_lowercase =value
elif weight_type == "bias":
_lowercase =value
else:
_lowercase =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''')
def _snake_case (_snake_case : List[str] , _snake_case : Union[str, Any]) -> List[Any]:
_lowercase =[]
_lowercase =fairseq_model.state_dict()
_lowercase =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_lowercase =False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == 'group' , )
_lowercase =True
else:
for key, mapped_key in MAPPING.items():
_lowercase ='unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.')[:-1]) != key):
# special case since naming is very similar
continue
_lowercase =True
if "*" in mapped_key:
_lowercase =name.split(_snake_case)[0].split('.')[-2]
_lowercase =mapped_key.replace('*' , _snake_case)
if "weight_g" in name:
_lowercase ='weight_g'
elif "weight_v" in name:
_lowercase ='weight_v'
elif "bias" in name:
_lowercase ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowercase ='weight'
else:
_lowercase =None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case)
continue
if not is_used:
unused_weights.append(_snake_case)
logger.warning(f'''Unused weights: {unused_weights}''')
def _snake_case (_snake_case : Any , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Any , _snake_case : int) -> Optional[Any]:
_lowercase =full_name.split('conv_layers.')[-1]
_lowercase =name.split('.')
_lowercase =int(items[0])
_lowercase =int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
_lowercase =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
_lowercase =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''')
_lowercase =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''')
_lowercase =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(_snake_case)
@torch.no_grad()
def _snake_case (_snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Union[str, Any]=None , _snake_case : str=None , _snake_case : Union[str, Any]=True) -> Dict:
if config_path is not None:
_lowercase =UniSpeechSatConfig.from_pretrained(_snake_case)
else:
_lowercase =UniSpeechSatConfig()
_lowercase =''
if is_finetuned:
_lowercase =UniSpeechSatForCTC(_snake_case)
else:
_lowercase =UniSpeechSatForPreTraining(_snake_case)
_lowercase , _lowercase , _lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])})
_lowercase =model[0].eval()
recursively_load_weights(_snake_case , _snake_case)
hf_wavavec.save_pretrained(_snake_case)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 557
| 1
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def __snake_case ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float , _lowerCAmelCase : int = 16000 ) -> Tuple:
A_ : Dict = int(round(sample_rate * max_length ) )
if len(_lowerCAmelCase ) <= sample_length:
return wav
A_ : Union[str, Any] = randint(0 , len(_lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__UpperCamelCase = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__UpperCamelCase = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__UpperCamelCase = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__UpperCamelCase = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__UpperCamelCase = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , snake_case , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def __snake_case ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ , A_ , A_ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ , A_ , A_ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _lowerCAmelCase , _lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
A_ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
A_ : Optional[Any] = DatasetDict()
A_ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
A_ : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
f"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
A_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
A_ : int = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
A_ : Union[str, Any] = feature_extractor.model_input_names[0]
def train_transforms(_lowerCAmelCase : Optional[Any] ):
A_ : Any = []
for audio in batch[data_args.audio_column_name]:
A_ : Dict = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_lowerCAmelCase )
A_ : int = feature_extractor(_lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
A_ : Any = {model_input_name: inputs.get(_lowerCAmelCase )}
A_ : Optional[Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCAmelCase : Union[str, Any] ):
A_ : int = [audio["array"] for audio in batch[data_args.audio_column_name]]
A_ : int = feature_extractor(_lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
A_ : Union[str, Any] = {model_input_name: inputs.get(_lowerCAmelCase )}
A_ : Tuple = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ : Dict = raw_datasets["train"].features[data_args.label_column_name].names
A_ , A_ : int = {}, {}
for i, label in enumerate(_lowerCAmelCase ):
A_ : str = str(_lowerCAmelCase )
A_ : List[str] = label
# Load the accuracy metric from the datasets package
A_ : Dict = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCAmelCase : Optional[int] ):
A_ : List[str] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_lowerCAmelCase , references=eval_pred.label_ids )
A_ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel=_lowerCAmelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A_ : Union[str, Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
A_ : Dict = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_lowerCAmelCase , output_all_columns=_lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A_ : int = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_lowerCAmelCase , output_all_columns=_lowerCAmelCase )
# Initialize our trainer
A_ : Optional[int] = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
# Training
if training_args.do_train:
A_ : Tuple = None
if training_args.resume_from_checkpoint is not None:
A_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ : int = last_checkpoint
A_ : Dict = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ : int = trainer.evaluate()
trainer.log_metrics("eval" , _lowerCAmelCase )
trainer.save_metrics("eval" , _lowerCAmelCase )
# Write model card and (optionally) push to hub
A_ : str = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCAmelCase )
else:
trainer.create_model_card(**_lowerCAmelCase )
if __name__ == "__main__":
main()
| 454
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 ) -> Any:
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]=0 ) -> int:
A_ : Union[str, Any] = []
for old_item in old_list:
A_ : Tuple = old_item.replace("in_layers.0" , "norm1" )
A_ : str = new_item.replace("in_layers.2" , "conv1" )
A_ : List[str] = new_item.replace("out_layers.0" , "norm2" )
A_ : Optional[int] = new_item.replace("out_layers.3" , "conv2" )
A_ : Union[str, Any] = new_item.replace("emb_layers.1" , "time_emb_proj" )
A_ : Tuple = new_item.replace("skip_connection" , "conv_shortcut" )
A_ : Any = shave_segments(_lowerCAmelCase , n_shave_prefix_segments=_lowerCAmelCase )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=0 ) -> Tuple:
A_ : Any = []
for old_item in old_list:
A_ : int = old_item
A_ : Dict = new_item.replace("norm.weight" , "group_norm.weight" )
A_ : Any = new_item.replace("norm.bias" , "group_norm.bias" )
A_ : Any = new_item.replace("proj_out.weight" , "proj_attn.weight" )
A_ : List[str] = new_item.replace("proj_out.bias" , "proj_attn.bias" )
A_ : List[Any] = shave_segments(_lowerCAmelCase , n_shave_prefix_segments=_lowerCAmelCase )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[str]=None ) -> Optional[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
A_ : Any = old_checkpoint[path]
A_ : Tuple = old_tensor.shape[0] // 3
A_ : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
A_ : Optional[Any] = old_tensor.shape[0] // config["num_head_channels"] // 3
A_ : int = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
A_ , A_ , A_ : Optional[int] = old_tensor.split(channels // num_heads , dim=1 )
A_ : Tuple = query.reshape(_lowerCAmelCase )
A_ : List[Any] = key.reshape(_lowerCAmelCase )
A_ : Union[str, Any] = value.reshape(_lowerCAmelCase )
for path in paths:
A_ : Any = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
A_ : List[str] = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
A_ : int = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
A_ : Tuple = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
A_ : Tuple = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
A_ : Tuple = old_checkpoint[path["old"]][:, :, 0]
else:
A_ : str = old_checkpoint[path["old"]]
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
A_ : Any = {}
A_ : Union[str, Any] = checkpoint["time_embed.0.weight"]
A_ : List[Any] = checkpoint["time_embed.0.bias"]
A_ : Any = checkpoint["time_embed.2.weight"]
A_ : Optional[Any] = checkpoint["time_embed.2.bias"]
A_ : List[str] = checkpoint["input_blocks.0.0.weight"]
A_ : Union[str, Any] = checkpoint["input_blocks.0.0.bias"]
A_ : str = checkpoint["out.0.weight"]
A_ : Optional[int] = checkpoint["out.0.bias"]
A_ : Optional[int] = checkpoint["out.2.weight"]
A_ : Optional[int] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
A_ : List[str] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
A_ : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key]
for layer_id in range(_lowerCAmelCase )
}
# Retrieves the keys for the middle blocks only
A_ : Union[str, Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
A_ : Tuple = {
layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key]
for layer_id in range(_lowerCAmelCase )
}
# Retrieves the keys for the output blocks only
A_ : Dict = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
A_ : Optional[Any] = {
layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key]
for layer_id in range(_lowerCAmelCase )
}
for i in range(1 , _lowerCAmelCase ):
A_ : Union[str, Any] = (i - 1) // (config["num_res_blocks"] + 1)
A_ : Optional[int] = (i - 1) % (config["num_res_blocks"] + 1)
A_ : str = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key]
A_ : List[Any] = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
if f"input_blocks.{i}.0.op.weight" in checkpoint:
A_ : Tuple = checkpoint[
f"input_blocks.{i}.0.op.weight"
]
A_ : List[Any] = checkpoint[
f"input_blocks.{i}.0.op.bias"
]
continue
A_ : List[Any] = renew_resnet_paths(_lowerCAmelCase )
A_ : Any = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
A_ : Any = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path, resnet_op] , config=_lowerCAmelCase )
if len(_lowerCAmelCase ):
A_ : Tuple = renew_attention_paths(_lowerCAmelCase )
A_ : List[str] = {
"old": f"input_blocks.{i}.1",
"new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
A_ : Any = {
f"input_blocks.{i}.1.qkv.bias": {
"key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"input_blocks.{i}.1.qkv.weight": {
"key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=_lowerCAmelCase , config=_lowerCAmelCase , )
A_ : List[Any] = middle_blocks[0]
A_ : Tuple = middle_blocks[1]
A_ : Dict = middle_blocks[2]
A_ : str = renew_resnet_paths(_lowerCAmelCase )
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
A_ : Tuple = renew_resnet_paths(_lowerCAmelCase )
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
A_ : Any = renew_attention_paths(_lowerCAmelCase )
A_ : Optional[int] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , attention_paths_to_split=_lowerCAmelCase , config=_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
A_ : Optional[int] = i // (config["num_res_blocks"] + 1)
A_ : Optional[Any] = i % (config["num_res_blocks"] + 1)
A_ : Optional[Any] = [shave_segments(_lowerCAmelCase , 2 ) for name in output_blocks[i]]
A_ : Union[str, Any] = {}
for layer in output_block_layers:
A_ , A_ : str = layer.split("." )[0], shave_segments(_lowerCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_lowerCAmelCase )
else:
A_ : List[str] = [layer_name]
if len(_lowerCAmelCase ) > 1:
A_ : Dict = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
A_ : List[Any] = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
A_ : Optional[Any] = renew_resnet_paths(_lowerCAmelCase )
A_ : Any = renew_resnet_paths(_lowerCAmelCase )
A_ : Optional[Any] = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
A_ : int = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
A_ : Union[str, Any] = checkpoint[
f"output_blocks.{i}.{index}.conv.weight"
]
A_ : int = checkpoint[
f"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(_lowerCAmelCase ) == 2:
A_ : Union[str, Any] = []
if len(_lowerCAmelCase ):
A_ : Dict = renew_attention_paths(_lowerCAmelCase )
A_ : Union[str, Any] = {
"old": f"output_blocks.{i}.1",
"new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
A_ : Any = {
f"output_blocks.{i}.1.qkv.bias": {
"key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"output_blocks.{i}.1.qkv.weight": {
"key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=_lowerCAmelCase , )
else:
A_ : Optional[int] = renew_resnet_paths(_lowerCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
A_ : List[Any] = ".".join(["output_blocks", str(_lowerCAmelCase ), path["old"]] )
A_ : Optional[Any] = ".".join(["up_blocks", str(_lowerCAmelCase ), "resnets", str(_lowerCAmelCase ), path["new"]] )
A_ : Optional[int] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_lowerCAmelCase : Optional[Any] = parser.parse_args()
_lowerCAmelCase : Optional[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowerCAmelCase : str = json.loads(f.read())
_lowerCAmelCase : Tuple = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowerCAmelCase : Optional[Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowerCAmelCase : List[str] = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_lowerCAmelCase : List[str] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_lowerCAmelCase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 454
| 1
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __UpperCamelCase ( lowercase__ , lowercase__ ):
lowercase : Union[str, Any] = 1
@register_to_config
def __init__( self :str ,_UpperCamelCase :Optional[Any]=2_0_0_0 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :Optional[int]=2_0 ,_UpperCamelCase :Dict=1E-3 ):
snake_case_ : str = None
snake_case_ : Dict = None
snake_case_ : List[Any] = None
def a__ ( self :List[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Union[str, torch.device] = None ):
snake_case_ : Any = torch.linspace(1 ,self.config.sampling_eps ,_UpperCamelCase ,device=_UpperCamelCase )
def a__ ( self :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Dict ,_UpperCamelCase :str=None ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case_ : Optional[Any] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case_ : Dict = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case_ : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case_ : Optional[int] = std.unsqueeze(-1 )
snake_case_ : Any = -score / std
# compute
snake_case_ : Dict = -1.0 / len(self.timesteps )
snake_case_ : Union[str, Any] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case_ : str = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case_ : Union[str, Any] = beta_t.unsqueeze(-1 )
snake_case_ : List[Any] = -0.5 * beta_t * x
snake_case_ : List[Any] = torch.sqrt(_UpperCamelCase )
snake_case_ : Union[str, Any] = drift - diffusion**2 * score
snake_case_ : int = x + drift * dt
# add noise
snake_case_ : Tuple = randn_tensor(x.shape ,layout=x.layout ,generator=_UpperCamelCase ,device=x.device ,dtype=x.dtype )
snake_case_ : Optional[Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :Union[str, Any] ):
return self.config.num_train_timesteps
| 267
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : List[str] = KandinskyVaaPriorPipeline
lowercase : Optional[Any] = ['prompt']
lowercase : Dict = ['prompt', 'negative_prompt']
lowercase : Dict = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
lowercase : int = False
@property
def a__ ( self :int ):
return 3_2
@property
def a__ ( self :Any ):
return 3_2
@property
def a__ ( self :Union[str, Any] ):
return self.time_input_dim
@property
def a__ ( self :int ):
return self.time_input_dim * 4
@property
def a__ ( self :Dict ):
return 1_0_0
@property
def a__ ( self :List[Any] ):
snake_case_ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def a__ ( self :int ):
torch.manual_seed(0 )
snake_case_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(_UpperCamelCase )
@property
def a__ ( self :Dict ):
torch.manual_seed(0 )
snake_case_ : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_2,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
snake_case_ : int = PriorTransformer(**_UpperCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
snake_case_ : Any = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def a__ ( self :Optional[int] ):
torch.manual_seed(0 )
snake_case_ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=2_2_4 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1_4 ,)
snake_case_ : Optional[Any] = CLIPVisionModelWithProjection(_UpperCamelCase )
return model
@property
def a__ ( self :List[Any] ):
snake_case_ : Any = CLIPImageProcessor(
crop_size=2_2_4 ,do_center_crop=_UpperCamelCase ,do_normalize=_UpperCamelCase ,do_resize=_UpperCamelCase ,image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,resample=3 ,size=2_2_4 ,)
return image_processor
def a__ ( self :List[Any] ):
snake_case_ : Tuple = self.dummy_prior
snake_case_ : Any = self.dummy_image_encoder
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : Any = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_image_processor
snake_case_ : Tuple = UnCLIPScheduler(
variance_type="""fixed_small_log""" ,prediction_type="""sample""" ,num_train_timesteps=1_0_0_0 ,clip_sample=_UpperCamelCase ,clip_sample_range=10.0 ,)
snake_case_ : Tuple = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def a__ ( self :Dict ,_UpperCamelCase :str ,_UpperCamelCase :Union[str, Any]=0 ):
if str(_UpperCamelCase ).startswith("""mps""" ):
snake_case_ : Optional[int] = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def a__ ( self :int ):
snake_case_ : Union[str, Any] = """cpu"""
snake_case_ : int = self.get_dummy_components()
snake_case_ : int = self.pipeline_class(**_UpperCamelCase )
snake_case_ : Union[str, Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ : Any = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
snake_case_ : List[Any] = output.image_embeds
snake_case_ : List[Any] = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) ,return_dict=_UpperCamelCase ,)[0]
snake_case_ : Dict = image[0, -1_0:]
snake_case_ : Optional[Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
snake_case_ : Optional[int] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a__ ( self :List[Any] ):
snake_case_ : Any = torch_device == """cpu"""
snake_case_ : Any = True
snake_case_ : Dict = False
self._test_inference_batch_single_identical(
test_max_difference=_UpperCamelCase ,relax_max_difference=_UpperCamelCase ,test_mean_pixel_difference=_UpperCamelCase ,)
@skip_mps
def a__ ( self :str ):
snake_case_ : str = torch_device == """cpu"""
snake_case_ : List[str] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_UpperCamelCase ,test_mean_pixel_difference=_UpperCamelCase ,)
| 267
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {"facebook/bart-base": BartForConditionalGeneration}
SCREAMING_SNAKE_CASE : List[str] = {"facebook/bart-base": BartTokenizer}
def UpperCamelCase_( ) -> Tuple:
_lowercase : Optional[Any] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=lowerCamelCase_ , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=lowerCamelCase_ , default=lowerCamelCase_ , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowerCamelCase_ , )
parser.add_argument(
'--config_name' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=lowerCamelCase_ , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='Where to store the final ONNX file.' )
_lowercase : Optional[Any] = parser.parse_args()
return args
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="cpu" ) -> str:
_lowercase : List[Any] = model_dict[model_name].from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
_lowercase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(lowerCamelCase_ )
if model_name in ["facebook/bart-base"]:
_lowercase : Optional[int] = 0
_lowercase : Any = None
_lowercase : List[str] = 0
return huggingface_model, tokenizer
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
model.eval()
_lowercase : str = None
_lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(lowerCamelCase_ ) )
with torch.no_grad():
_lowercase : str = 'My friends are cool but they eat too many carbs.'
_lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='pt' ).to(model.device )
_lowercase : Optional[Any] = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=lowerCamelCase_ , max_length=lowerCamelCase_ , early_stopping=lowerCamelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowerCamelCase_ , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowerCamelCase_ , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=lowerCamelCase_ , )
logger.info('Model exported to {}'.format(lowerCamelCase_ ) )
_lowercase : Any = remove_dup_initializers(os.path.abspath(lowerCamelCase_ ) )
logger.info('Deduplicated and optimized model written to {}'.format(lowerCamelCase_ ) )
_lowercase : int = onnxruntime.InferenceSession(lowerCamelCase_ )
_lowercase : Any = ort_sess.run(
lowerCamelCase_ , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(lowerCamelCase_ ),
'max_length': np.array(lowerCamelCase_ ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def UpperCamelCase_( ) -> Tuple:
_lowercase : int = parse_args()
_lowercase : Optional[Any] = 5
_lowercase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Optional[int] = torch.device(args.device )
_lowercase , _lowercase : List[str] = load_model_tokenizer(args.model_name_or_path , lowerCamelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(lowerCamelCase_ )
if args.max_length:
_lowercase : int = args.max_length
if args.num_beams:
_lowercase : Union[str, Any] = args.num_beams
if args.output_file_path:
_lowercase : List[Any] = args.output_file_path
else:
_lowercase : List[Any] = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 89
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 712
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
SCREAMING_SNAKE_CASE_ = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
a_ :List[Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ :List[Any] =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a_ :List[Any] ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a_ :Tuple ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __a ( self : int ):
'''simple docstring'''
__a = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
__a = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}] )
__a = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
__a = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
# Legacy behavior
__a = text_classifier("""This is great !""" , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
__a = text_classifier("""This is great !""" , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}]] )
__a = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
__a = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
] , )
@require_torch
def __a ( self : Optional[int] ):
'''simple docstring'''
import torch
__a = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@require_tf
def __a ( self : List[str] ):
'''simple docstring'''
__a = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@slow
@require_torch
def __a ( self : int ):
'''simple docstring'''
__a = pipeline("""text-classification""" )
__a = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
@slow
@require_tf
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a = pipeline("""text-classification""" , framework="""tf""" )
__a = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
def __a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a = """HuggingFace is in"""
__a = text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a = ["""HuggingFace is in """, """Paris is in France"""]
__a = text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )}, {"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a = text_classifier(SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ )
__a = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )}] * N, [{"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )}] * N] , )
__a = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a = text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
text_classifier(SCREAMING_SNAKE_CASE__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 201
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 235
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase_ = """
Human: <<task>>
Assistant: """
lowercase_ = """huggingface-tools/default-prompts"""
lowercase_ = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
lowercase__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , _SCREAMING_SNAKE_CASE ) is not None:
return prompt_or_repo_id
lowercase__ = cached_file(
_SCREAMING_SNAKE_CASE , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
return f.read()
| 235
| 1
|
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Dict = f'{sampling_rate}'
snake_case_ : Any = """1"""
snake_case_ : Any = """f32le"""
snake_case_ : Optional[int] = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCAmelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
snake_case_ : str = ffmpeg_process.communicate(lowerCAmelCase__ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
snake_case_ : str = output_stream[0]
snake_case_ : List[Any] = np.frombuffer(lowerCAmelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] = "f32le" , ):
"""simple docstring"""
snake_case_ : int = f'{sampling_rate}'
snake_case_ : str = """1"""
if format_for_conversion == "s16le":
snake_case_ : Union[str, Any] = 2
elif format_for_conversion == "f32le":
snake_case_ : Dict = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
snake_case_ : Optional[int] = platform.system()
if system == "Linux":
snake_case_ : Dict = """alsa"""
snake_case_ : List[str] = """default"""
elif system == "Darwin":
snake_case_ : str = """avfoundation"""
snake_case_ : Tuple = """:0"""
elif system == "Windows":
snake_case_ : Union[str, Any] = """dshow"""
snake_case_ : Tuple = """default"""
snake_case_ : Dict = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
snake_case_ : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
snake_case_ : str = _ffmpeg_stream(lowerCAmelCase__ , lowerCAmelCase__ )
for item in iterator:
yield item
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Any = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
snake_case_ : Any = stream_chunk_s
else:
snake_case_ : Optional[Any] = chunk_length_s
snake_case_ : List[Any] = ffmpeg_microphone(lowerCAmelCase__ , lowerCAmelCase__ , format_for_conversion=lowerCAmelCase__ )
if format_for_conversion == "s16le":
snake_case_ : Optional[Any] = np.intaa
snake_case_ : Union[str, Any] = 2
elif format_for_conversion == "f32le":
snake_case_ : List[str] = np.floataa
snake_case_ : Any = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
snake_case_ : Dict = chunk_length_s / 6
snake_case_ : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase__ , (int, float) ):
snake_case_ : List[str] = [stride_length_s, stride_length_s]
snake_case_ : int = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
snake_case_ : Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
snake_case_ : int = datetime.datetime.now()
snake_case_ : Union[str, Any] = datetime.timedelta(seconds=lowerCAmelCase__ )
for item in chunk_bytes_iter(lowerCAmelCase__ , lowerCAmelCase__ , stride=(stride_left, stride_right) , stream=lowerCAmelCase__ ):
# Put everything back in numpy scale
snake_case_ : Tuple = np.frombuffer(item["""raw"""] , dtype=lowerCAmelCase__ )
snake_case_ : Any = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
snake_case_ : Optional[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple = False ):
"""simple docstring"""
snake_case_ : Tuple = b""""""
snake_case_ , snake_case_ : Optional[int] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
snake_case_ : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase__ ) < chunk_len:
snake_case_ : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase__ ) >= chunk_len:
# We are flushing the accumulator
snake_case_ : Dict = (_stride_left, stride_right)
snake_case_ : Optional[int] = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
snake_case_ : Optional[Any] = False
yield item
snake_case_ : List[Any] = stride_left
snake_case_ : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase__ ) > stride_left:
snake_case_ : str = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
snake_case_ : Union[str, Any] = False
yield item
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : str = 2**2_4 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase__ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase__ ) as ffmpeg_process:
while True:
snake_case_ : Any = ffmpeg_process.stdout.read(lowerCAmelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 714
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase : int = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase : List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=False , snake_case=9_9 , snake_case=1_6 , snake_case=2 , snake_case=4 , snake_case=4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=3_2 , snake_case=2 , snake_case=1 , snake_case=0 , snake_case=0.02 , ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : int = use_labels
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : List[Any] = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Dict = eos_token_id
UpperCAmelCase : Any = pad_token_id
UpperCAmelCase : List[str] = bos_token_id
UpperCAmelCase : Any = initializer_range
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase : Any = shift_tokens_right(snake_case , 1 , 2 )
UpperCAmelCase : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=snake_case , )
UpperCAmelCase : List[str] = prepare_blenderbot_inputs_dict(snake_case , snake_case , snake_case )
return config, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = 2_0
UpperCAmelCase : List[str] = model_class_name(snake_case )
UpperCAmelCase : List[str] = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , snake_case , snake_case )
UpperCAmelCase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase : Tuple = model.decode(
decoder_input_ids[:, :-1] , snake_case , decoder_attention_mask=snake_case , past_key_values=snake_case , decoder_position_ids=snake_case , )
UpperCAmelCase : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , snake_case , decoder_attention_mask=snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case , )
UpperCAmelCase : List[Any] = model.decode(snake_case , snake_case )
UpperCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 2_0
UpperCAmelCase : Optional[int] = model_class_name(snake_case )
UpperCAmelCase : Optional[int] = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase , UpperCAmelCase : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , snake_case , snake_case )
UpperCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase : int = model.decode(
decoder_input_ids[:, :-1] , snake_case , decoder_attention_mask=snake_case , past_key_values=snake_case , decoder_position_ids=snake_case , )
UpperCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case , decoder_position_ids=snake_case , )
UpperCAmelCase : Any = model.decode(snake_case , snake_case , decoder_attention_mask=snake_case )
UpperCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = 99
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase : List[Any] = input_ids.shape[0]
UpperCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self._get_config_and_data()
UpperCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(snake_case )
UpperCAmelCase : Optional[Any] = lm_model(input_ids=snake_case )
UpperCAmelCase : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
UpperCAmelCase : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(snake_case )
UpperCAmelCase : int = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
UpperCAmelCase : Dict = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase : List[str] = lm_model(input_ids=snake_case , decoder_input_ids=snake_case )
UpperCAmelCase : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
UpperCAmelCase : Any = shift_tokens_right(snake_case , 1 , 2 )
UpperCAmelCase : str = np.equal(snake_case , 1 ).astype(np.floataa ).sum()
UpperCAmelCase : int = np.equal(snake_case , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(snake_case , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCamelCase__ ( lowercase__ , unittest.TestCase , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : str = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = FlaxBlenderbotModelTester(self )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : int = self._prepare_for_class(snake_case , snake_case )
UpperCAmelCase : Tuple = model_class(snake_case )
@jax.jit
def encode_jitted(snake_case , snake_case=None , **snake_case ):
return model.encode(input_ids=snake_case , attention_mask=snake_case )
with self.subTest("JIT Enabled" ):
UpperCAmelCase : Optional[int] = encode_jitted(**snake_case ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase : Any = encode_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : str = model_class(snake_case )
UpperCAmelCase : List[Any] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCAmelCase : Dict = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case , snake_case , snake_case ):
return model.decode(
decoder_input_ids=snake_case , decoder_attention_mask=snake_case , encoder_outputs=snake_case , )
with self.subTest("JIT Enabled" ):
UpperCAmelCase : List[Any] = decode_jitted(**snake_case ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase : str = decode_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase : Tuple = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase : List[Any] = model(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = {"num_beams": 1, "early_stopping": True, "min_length": 1_5, "max_length": 2_5}
UpperCAmelCase : Any = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
UpperCAmelCase : List[str] = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=snake_case )
UpperCAmelCase : str = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
UpperCAmelCase : Union[str, Any] = ["Sam"]
UpperCAmelCase : Optional[int] = tokenizer(snake_case , return_tensors="jax" )
UpperCAmelCase : str = model.generate(**snake_case , **snake_case )
UpperCAmelCase : Tuple = "Sam is a great name. It means \"sun\" in Gaelic."
UpperCAmelCase : List[Any] = tokenizer.batch_decode(snake_case , **snake_case )
assert generated_txt[0].strip() == tgt_text
| 679
|
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
a : str = "src/transformers"
# Matches is_xxx_available()
a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
a : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a : Any = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
a : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
a : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
a : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
a : List[str] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
a : Any = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
a : Union[str, Any] = re.compile(R"^\s*try:")
# Catches a line with else:
a : Tuple = re.compile(R"^\s*else:")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if _re_test_backend.search(__magic_name__ ) is None:
return None
UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
UpperCAmelCase : int = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
UpperCAmelCase : Any = re.findall("\[([^\]]+)\]" , __magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
UpperCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
UpperCAmelCase : List[str] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
UpperCAmelCase : Optional[Any] = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase : List[str] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase : int = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase : str = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase : Tuple = []
for key in import_dict_objects.keys():
UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase : List[Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
UpperCAmelCase : Dict = os.path.join(__magic_name__ , "__init__.py" )
UpperCAmelCase : Optional[Any] = parse_init(__magic_name__ )
if objects is not None:
UpperCAmelCase : int = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
UpperCAmelCase : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError("\n\n".join(__magic_name__ ) )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase : Any = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = short_path.replace(os.path.sep , "." )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase : List[str] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
UpperCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__magic_name__ )
return submodules
a : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__magic_name__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase : Optional[int] = spec.loader.load_module()
UpperCAmelCase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
UpperCAmelCase : List[str] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 679
| 1
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = False, False, False
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[int] = None
__lowercase: bool = True
__lowercase: bool = True
__lowercase: Optional[str] = None
# Automatically constructed
__lowercase: ClassVar[str] = "dict"
__lowercase: ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()})
__lowercase: str = field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE)
def __call__( self : str ) ->Optional[int]:
"""simple docstring"""
return self.pa_type
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : int ) ->int:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install \'soundfile\'.""" ) from err
if isinstance(_a , _a ):
return {"bytes": None, "path": value}
elif isinstance(_a , _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case_ = BytesIO()
sf.write(_a , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a \'sampling_rate\' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case_ = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
snake_case_ = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
snake_case_ = BytesIO(bytes() )
sf.write(_a , _a , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F"""An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any = None ) ->Dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
snake_case_ , snake_case_ = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install \'librosa\' and \'soundfile\'.""" ) from err
snake_case_ = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
snake_case_ = token_per_repo_id or {}
snake_case_ = path.split("""::""" )[-1]
try:
snake_case_ = string_to_dict(_a , config.HUB_DATASETS_URL )["""repo_id"""]
snake_case_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case_ = None
with xopen(_a , """rb""" , use_auth_token=_a ) as f:
snake_case_ , snake_case_ = sf.read(_a )
else:
snake_case_ , snake_case_ = sf.read(_a )
snake_case_ = array.T
if self.mono:
snake_case_ = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case_ = librosa.resample(_a , orig_sr=_a , target_sr=self.sampling_rate )
snake_case_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->int:
"""simple docstring"""
if pa.types.is_string(storage.type ):
snake_case_ = pa.array([None] * len(_a ) , type=pa.binary() )
snake_case_ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case_ = pa.array([None] * len(_a ) , type=pa.string() )
snake_case_ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
snake_case_ = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
snake_case_ = storage.field("""bytes""" )
else:
snake_case_ = pa.array([None] * len(_a ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
snake_case_ = storage.field("""path""" )
else:
snake_case_ = pa.array([None] * len(_a ) , type=pa.string() )
snake_case_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_a , self.pa_type )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Tuple ) ->Any:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase_ : str ):
with xopen(_a , """rb""" ) as f:
snake_case_ = f.read()
return bytes_
snake_case_ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case_ = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
snake_case_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_a , self.pa_type )
| 720
|
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = 'https://openaipublic.azureedge.net/jukebox/models/'
__SCREAMING_SNAKE_CASE : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
snake_case_ = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
snake_case_ = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case_ = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
snake_case_ = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = {}
import re
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
snake_case_ = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
snake_case_ = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
snake_case_ = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
snake_case_ = original_key
snake_case_ = replace_key(_SCREAMING_SNAKE_CASE )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
snake_case_ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
snake_case_ = original_key
snake_case_ = original_key
snake_case_ = value
return new_dict
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
snake_case_ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_SCREAMING_SNAKE_CASE )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , """wb""" ).write(r.content )
snake_case_ = MODEL_MAPPING[model_name.split("""/""" )[-1]]
snake_case_ = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = JukeboxModel(_SCREAMING_SNAKE_CASE )
snake_case_ = []
snake_case_ = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["""model"""]
snake_case_ = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
snake_case_ = old_dic[k]
elif k.endswith(""".w""" ):
snake_case_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case_ = old_dic[k]
else:
snake_case_ = old_dic[k]
snake_case_ = """vqvae""" if i == 0 else f"""priors.{3 - i}"""
snake_case_ = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
snake_case_ = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , """w""" ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 2
| 0
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 687
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 687
| 1
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_lowerCamelCase : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ ( datasets.BuilderConfig ):
__UpperCAmelCase = 10_000
__UpperCAmelCase = None
__UpperCAmelCase = None
class lowerCamelCase__ ( datasets.ArrowBasedBuilder ):
__UpperCAmelCase = ParquetConfig
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Dict:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_UpperCamelCase :Dict =dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCamelCase :Dict =data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :Union[str, Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_UpperCamelCase :Optional[Any] =[dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_UpperCamelCase :Optional[int] =[]
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :Union[str, Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_UpperCamelCase :int =[dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , """rb""" ) as f:
_UpperCamelCase :List[Any] =datasets.Features.from_arrow_schema(pq.read_schema(lowerCAmelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_UpperCamelCase :Optional[int] =table_cast(lowerCAmelCase__ , self.info.features.arrow_schema )
return pa_table
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :str =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
with open(lowerCAmelCase__ , """rb""" ) as f:
_UpperCamelCase :Any =pq.ParquetFile(lowerCAmelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_UpperCamelCase :Optional[Any] =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 512
|
'''simple docstring'''
def _lowerCAmelCase ( __a , __a ) -> float:
'''simple docstring'''
def get_matched_characters(__a , __a ) -> str:
_UpperCamelCase :Any =[]
_UpperCamelCase :List[str] =min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCamelCase :int =int(max(0 , i - limit ) )
_UpperCamelCase :List[Any] =int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__a )
_UpperCamelCase :Optional[int] =F'''{_stra[0:_stra.index(__a )]} {_stra[_stra.index(__a ) + 1:]}'''
return "".join(__a )
# matching characters
_UpperCamelCase :str =get_matched_characters(__a , __a )
_UpperCamelCase :List[Any] =get_matched_characters(__a , __a )
_UpperCamelCase :List[str] =len(__a )
# transposition
_UpperCamelCase :Optional[Any] =(
len([(ca, ca) for ca, ca in zip(__a , __a ) if ca != ca] ) // 2
)
if not match_count:
_UpperCamelCase :List[str] =0.0
else:
_UpperCamelCase :Union[str, Any] =(
1
/ 3
* (
match_count / len(__a )
+ match_count / len(__a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCamelCase :int =0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 512
| 1
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *a , **a) -> Dict:
pass
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = hashlib.mda(image.tobytes())
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _snake_case ( unittest.TestCase ):
_lowercase : Optional[int] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = DepthEstimationPipeline(model=lowerCamelCase__ , image_processor=lowerCamelCase__)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png')
self.assertEqual({'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)} , lowerCamelCase__)
import datasets
SCREAMING_SNAKE_CASE = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test')
SCREAMING_SNAKE_CASE = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
])
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
] , lowerCamelCase__ , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = 'Intel/dpt-large'
SCREAMING_SNAKE_CASE = pipeline('depth-estimation' , model=lowerCamelCase__)
SCREAMING_SNAKE_CASE = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg')
SCREAMING_SNAKE_CASE = hashimage(outputs['depth'])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item()) , 29.3_04)
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item()) , 2.6_62)
@require_torch
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT')
| 73
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = analyze_text(UpperCamelCase__ )
__lowerCamelCase = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
__lowerCamelCase = sum(single_char_strings.values() )
# one length string
__lowerCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__lowerCamelCase = single_char_strings[ch]
__lowerCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
__lowerCamelCase = sum(two_char_strings.values() )
__lowerCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__lowerCamelCase = cha + cha
if sequence in two_char_strings:
__lowerCamelCase = two_char_strings[sequence]
__lowerCamelCase = int(UpperCamelCase__ ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> tuple[dict, dict]:
"""simple docstring"""
__lowerCamelCase = Counter() # type: ignore
__lowerCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 469
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,) -> Dict:
'''simple docstring'''
if config_name_or_path is None:
__UpperCamelCase = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
__UpperCamelCase = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__UpperCamelCase = question_encoder_name_or_path
__UpperCamelCase = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
__UpperCamelCase = RagConfig.from_pretrained(_snake_case )
__UpperCamelCase = AutoConfig.from_pretrained(_snake_case )
__UpperCamelCase = AutoConfig.from_pretrained(_snake_case )
__UpperCamelCase = gen_config
__UpperCamelCase = question_encoder_config
__UpperCamelCase = model_class.from_pretrained_question_encoder_generator(
_snake_case ,_snake_case ,config=_snake_case )
rag_model.save_pretrained(_snake_case )
# Sanity check.
model_class.from_pretrained(_snake_case )
# Save tokenizers.
__UpperCamelCase = AutoTokenizer.from_pretrained(_snake_case )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
__UpperCamelCase = AutoTokenizer.from_pretrained(_snake_case )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
_A = parser.parse_args()
_A = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 228
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {"vocab_file": "spiece.model"}
_A = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
_A = {"bert_for_seq_generation": 512}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[int] = []
_snake_case : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Tuple , A_ : List[str] , A_ : List[str]="<s>" , A_ : Optional[int]="</s>" , A_ : Dict="<unk>" , A_ : Optional[int]="<pad>" , A_ : int="<::::>" , A_ : Optional[Dict[str, Any]] = None , **A_ : Optional[int] , )-> None:
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sep_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__UpperCamelCase = vocab_file
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def A ( self : Optional[Any] )-> List[str]:
return self.sp_model.get_piece_size()
def A ( self : List[Any] )-> int:
__UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] )-> Dict:
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self : str , A_ : Optional[Any] )-> List[Any]:
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Optional[Any] , A_ : str )-> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def A ( self : List[str] , A_ : Union[str, Any] )-> str:
return self.sp_model.piece_to_id(A_ )
def A ( self : List[Any] , A_ : Dict )-> Optional[Any]:
__UpperCamelCase = self.sp_model.IdToPiece(A_ )
return token
def A ( self : List[Any] , A_ : Any )-> Union[str, Any]:
__UpperCamelCase = []
__UpperCamelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def A ( self : int , A_ : str , A_ : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCamelCase = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , "wb" ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 228
| 1
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase)
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : List[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Any ):
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
requires_backends(self , "decord" )
self.check_model_type(__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Dict=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Dict=None ):
_UpperCAmelCase = {}
if frame_sampling_rate is not None:
_UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
_UpperCAmelCase = num_frames
_UpperCAmelCase = {}
if top_k is not None:
_UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , __UpperCamelCase : Union[str, List[str]] , **__UpperCamelCase : List[str] ):
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Optional[Any]=1 ):
if num_frames is None:
_UpperCAmelCase = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
_UpperCAmelCase = BytesIO(requests.get(__UpperCamelCase ).content )
_UpperCAmelCase = VideoReader(__UpperCamelCase )
videoreader.seek(0 )
_UpperCAmelCase = 0
_UpperCAmelCase = num_frames * frame_sampling_rate - 1
_UpperCAmelCase = np.linspace(__UpperCamelCase , __UpperCamelCase , num=__UpperCamelCase , dtype=np.intaa )
_UpperCAmelCase = videoreader.get_batch(__UpperCamelCase ).asnumpy()
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = self.image_processor(__UpperCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = self.model(**__UpperCamelCase )
return model_outputs
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
_UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
_UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
_UpperCAmelCase , _UpperCAmelCase = probs.topk(__UpperCamelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCamelCase , __UpperCamelCase )]
| 684
|
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684
| 1
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__A : str = TypeVar('T')
__A : Optional[int] = TypeVar('U')
class _SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : T | None , __lowerCamelCase : U | None ):
SCREAMING_SNAKE_CASE = key
SCREAMING_SNAKE_CASE = val
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __repr__( self : int ):
return (
f"Node: key: {self.key}, val: {self.val}, "
f"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class _SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Tuple ):
SCREAMING_SNAKE_CASE = DoubleLinkedListNode(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE = DoubleLinkedListNode(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.rear, self.head
def __repr__( self : Optional[int] ):
SCREAMING_SNAKE_CASE = ["DoubleLinkedList"]
SCREAMING_SNAKE_CASE = self.head
while node.next is not None:
rep.append(str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE = node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCamelCase__ )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : DoubleLinkedListNode[T, U] ):
SCREAMING_SNAKE_CASE = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = previous
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = self.rear
def _snake_case ( self : str , __lowerCamelCase : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
SCREAMING_SNAKE_CASE = node.next
SCREAMING_SNAKE_CASE = node.prev
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
return node
class _SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
lowerCamelCase__ = {}
def __init__( self : Optional[Any] , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = DoubleLinkedList()
SCREAMING_SNAKE_CASE = capacity
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = {}
def __repr__( self : List[str] ):
return (
f"CacheInfo(hits={self.hits}, misses={self.miss}, "
f"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : str , __lowerCamelCase : T ):
return key in self.cache
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : T ):
if key in self.cache:
self.hits += 1
SCREAMING_SNAKE_CASE = self.cache[key]
SCREAMING_SNAKE_CASE = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCamelCase__ )
return node.val
self.miss += 1
return None
def _snake_case ( self : Optional[Any] , __lowerCamelCase : T , __lowerCamelCase : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
SCREAMING_SNAKE_CASE = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCamelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
SCREAMING_SNAKE_CASE = DoubleLinkedListNode(UpperCamelCase__ , UpperCamelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
SCREAMING_SNAKE_CASE = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
SCREAMING_SNAKE_CASE = value
self.list.add(UpperCamelCase__ )
@classmethod
def _snake_case ( cls : List[Any] , __lowerCamelCase : int = 128 ):
def cache_decorator_inner(__lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
SCREAMING_SNAKE_CASE = LRUCache(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
SCREAMING_SNAKE_CASE = func(*UpperCamelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , UpperCamelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCamelCase__ , "cache_info" , UpperCamelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
|
import math
def a__ ( A_ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( A_ = 0.1 ):
'''simple docstring'''
__magic_name__ = 3
__magic_name__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ):
primes += is_prime(A_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 529
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "bridgetower_vision_model"
def __init__( self : Dict , __A : Optional[int]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=3 , __A : Optional[Any]=1_6 , __A : Any=2_8_8 , __A : str=1 , __A : Any=1e-0_5 , __A : Optional[int]=False , __A : Optional[Any]=True , __A : List[str]=False , **__A : Union[str, Any] , ):
super().__init__(**__A )
snake_case__ : Any = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Any = num_channels
snake_case__ : str = patch_size
snake_case__ : Dict = image_size
snake_case__ : Union[str, Any] = initializer_factor
snake_case__ : Dict = layer_norm_eps
snake_case__ : Tuple = stop_gradient
snake_case__ : Any = share_layernorm
snake_case__ : Tuple = remove_last_layer
@classmethod
def _lowercase ( cls : Dict , __A : Union[str, os.PathLike] , **__A : Tuple ):
snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
if config_dict.get("model_type" ) == "bridgetower":
snake_case__ : List[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "bridgetower_text_model"
def __init__( self : Optional[int] , __A : str=5_0_2_6_5 , __A : List[Any]=7_6_8 , __A : int=1_2 , __A : Optional[Any]=1_2 , __A : str=1 , __A : Dict=3_0_7_2 , __A : Tuple="gelu" , __A : Optional[Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_4 , __A : List[str]=1 , __A : List[Any]=1e-0_5 , __A : int=1 , __A : str=0 , __A : str=2 , __A : Union[str, Any]="absolute" , __A : Optional[Any]=True , **__A : Optional[Any] , ):
super().__init__(**__A )
snake_case__ : List[str] = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Any = initializer_factor
snake_case__ : List[Any] = intermediate_size
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Union[str, Any] = use_cache
snake_case__ : List[str] = pad_token_id
snake_case__ : Tuple = bos_token_id
snake_case__ : Dict = eos_token_id
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Union[str, Any] ):
snake_case__ : Any = cls.get_config_dict(__A , **__A )
if config_dict.get("model_type" ) == "bridgetower":
snake_case__ : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "bridgetower"
def __init__( self : Optional[int] , __A : Dict=True , __A : Union[str, Any]="gelu" , __A : Tuple=7_6_8 , __A : Dict=1 , __A : Optional[Any]=1e-0_5 , __A : Optional[int]=False , __A : int="add" , __A : List[Any]=1_2 , __A : Any=6 , __A : List[str]=False , __A : int=False , __A : List[Any]=None , __A : Union[str, Any]=None , **__A : str , ):
# TODO: remove this once the Hub files are updated.
snake_case__ : Optional[int] = kwargs.pop("text_config_dict" , __A )
snake_case__ : Tuple = kwargs.pop("vision_config_dict" , __A )
super().__init__(**__A )
snake_case__ : Dict = share_cross_modal_transformer_layers
snake_case__ : Optional[int] = hidden_act
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Any = initializer_factor
snake_case__ : Any = layer_norm_eps
snake_case__ : Optional[Any] = share_link_tower_layers
snake_case__ : Any = link_tower_type
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = num_hidden_layers
snake_case__ : Tuple = tie_word_embeddings
snake_case__ : Any = init_layernorm_from_vision_encoder
if text_config is None:
snake_case__ : Optional[Any] = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
snake_case__ : int = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
snake_case__ : Optional[int] = BridgeTowerTextConfig(**__A )
snake_case__ : Optional[Any] = BridgeTowerVisionConfig(**__A )
@classmethod
def _lowercase ( cls : Any , __A : BridgeTowerTextConfig , __A : BridgeTowerVisionConfig , **__A : List[str] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = copy.deepcopy(self.__dict__ )
snake_case__ : str = self.text_config.to_dict()
snake_case__ : Dict = self.vision_config.to_dict()
snake_case__ : str = self.__class__.model_type
return output
| 717
|
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return lst
snake_case__ : List[Any] = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 25
| 0
|
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("""Input value must be a \'int\' type""" )
return bin(_SCREAMING_SNAKE_CASE ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
|
__A : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_A = Stack()
_A = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(_SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
_A = operator_stack.peek()
operator_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
operand_stack.push(_SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__A : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 27
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Tuple = logging.get_logger(__name__)
UpperCamelCase_ : Dict = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = "gpt_neox"
def __init__( self : Dict , _snake_case : List[Any]=50_432 , _snake_case : Optional[Any]=6_144 , _snake_case : Dict=44 , _snake_case : Any=64 , _snake_case : Any=24_576 , _snake_case : str="gelu" , _snake_case : List[Any]=0.2_5 , _snake_case : Union[str, Any]=10_000 , _snake_case : int=0.0 , _snake_case : int=0.0 , _snake_case : Any=0.1 , _snake_case : str=2_048 , _snake_case : Any=0.0_2 , _snake_case : Optional[Any]=1e-5 , _snake_case : List[str]=True , _snake_case : Union[str, Any]=0 , _snake_case : Optional[Any]=2 , _snake_case : Dict=False , _snake_case : Union[str, Any]=True , _snake_case : List[Any]=None , **_snake_case : Any , ) -> str:
"""simple docstring"""
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = rotary_pct
A_ = rotary_emb_base
A_ = attention_dropout
A_ = hidden_dropout
A_ = classifier_dropout
A_ = initializer_range
A_ = layer_norm_eps
A_ = use_cache
A_ = tie_word_embeddings
A_ = use_parallel_residual
A_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'got {self.rope_scaling}' )
A_ = self.rope_scaling.get("type" , _snake_case )
A_ = self.rope_scaling.get("factor" , _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 482
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCamelCase_ : str = '''us-east-1''' # defaults region
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case = 42
snake_case = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
snake_case = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_00,
"save_steps": 55_00,
}
snake_case = {**hyperparameters, "max_steps": 10_00}
@property
def lowerCamelCase__ ( self : int ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCamelCase__ ( self : Any ) -> str:
"""simple docstring"""
return F'{self.framework}-transfromers-test'
@property
def lowerCamelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
return F'./tests/sagemaker/scripts/{self.framework}'
@property
def lowerCamelCase__ ( self : Any ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def A_ (__a ):
'''simple docstring'''
A_ = SageMakerTestEnvironment(framework=request.cls.framework )
| 482
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowercase__( _UpperCamelCase : list[int] , _UpperCamelCase : int )-> list[int]:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = len(_UpperCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_UpperCamelCase = i + 1
else:
_UpperCamelCase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 138
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def a ( self ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def a ( self ):
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = ScoreSdeVeScheduler()
_UpperCamelCase = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A_ ).images
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A_ , return_dict=A_ )[
0
]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def a ( self ):
_UpperCamelCase = "google/ncsnpp-church-256"
_UpperCamelCase = UNetaDModel.from_pretrained(A_ )
_UpperCamelCase = ScoreSdeVeScheduler.from_pretrained(A_ )
_UpperCamelCase = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=A_ ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 138
| 1
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A_ : Dict =logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase ( __a ):
__A : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
__A : bool = field(default=__a , metadata={'help': 'Whether to SortishSamler or not.'} )
__A : bool = field(
default=__a , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
__A : bool = field(default=__a , metadata={'help': 'whether to use adafactor'} )
__A : Optional[float] = field(
default=__a , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
__A : Optional[float] = field(
default=__a , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
__A : Optional[float] = field(default=__a , metadata={'help': 'Dropout probability. Goes into model.config.'} )
__A : Optional[float] = field(
default=__a , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
__A : Optional[str] = field(
default='linear' , metadata={'help': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 716
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Tuple =[
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def snake_case_ ( __snake_case : List[Any] , __snake_case : List[Any]=None , __snake_case : Dict=None , __snake_case : Dict=None) -> Dict:
lowerCAmelCase_ = True
while ask_again:
lowerCAmelCase_ = input(__snake_case)
try:
if default is not None and len(__snake_case) == 0:
return default
return convert_value(__snake_case) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__snake_case)
def snake_case_ ( __snake_case : Union[str, Any] , __snake_case : int=[] , __snake_case : Any=None , __snake_case : List[str]=0) -> str:
lowerCAmelCase_ = BulletMenu(__snake_case , __snake_case)
lowerCAmelCase_ = menu.run(default_choice=__snake_case)
return convert_value(__snake_case) if convert_value is not None else result
def snake_case_ ( __snake_case : Tuple) -> Any:
lowerCAmelCase_ = int(__snake_case)
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value])
def snake_case_ ( __snake_case : List[str]) -> Union[str, Any]:
lowerCAmelCase_ = int(__snake_case)
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value])
def snake_case_ ( __snake_case : Tuple) -> int:
lowerCAmelCase_ = int(__snake_case)
return DynamoBackend(DYNAMO_BACKENDS[value]).value
def snake_case_ ( __snake_case : Optional[int]) -> str:
lowerCAmelCase_ = int(__snake_case)
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value])
def snake_case_ ( __snake_case : int) -> Optional[Any]:
lowerCAmelCase_ = int(__snake_case)
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value])
def snake_case_ ( __snake_case : List[str]) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class __UpperCAmelCase ( argparse.RawDescriptionHelpFormatter ):
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = super()._format_usage(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 606
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# Initialise PyTorch model
__lowerCAmelCase: Dict = BertConfig.from_json_file(_A )
print(F"Building PyTorch model from configuration: {config}" )
__lowerCAmelCase: Any = BertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_A , _A , _A )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 346
|
_A = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowerCAmelCase_ = Stack()
lowerCAmelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_A ) )
elif i in operators:
# RULE 2
operator_stack.push(_A )
elif i == ")":
# RULE 4
lowerCAmelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ = operators[opr](_A , _A )
operand_stack.push(_A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_A = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 431
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def A ( lowercase__ : Dict ) -> Any:
if "cls_token" in name:
UpperCamelCase__ :str = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
UpperCamelCase__ :List[str] = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
UpperCamelCase__ :Union[str, Any] = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase__ :Dict = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
UpperCamelCase__ :Union[str, Any] = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCamelCase__ :Tuple = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
UpperCamelCase__ :Any = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
UpperCamelCase__ :Dict = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
UpperCamelCase__ :Tuple = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCamelCase__ :Optional[Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCamelCase__ :Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCamelCase__ :Any = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCamelCase__ :Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCamelCase__ :Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
UpperCamelCase__ :Optional[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
UpperCamelCase__ :List[Any] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
UpperCamelCase__ :int = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase__ :Any = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase__ :Optional[Any] = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def A ( lowercase__ : List[str] , lowercase__ : Optional[int] ) -> List[Any]:
for key in orig_state_dict.copy().keys():
UpperCamelCase__ :Any = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
UpperCamelCase__ :str = key.split(""".""" )
UpperCamelCase__ :List[str] = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase__ :Dict = config.decoder_hidden_size
UpperCamelCase__ :List[str] = """decoder.decoder_layers."""
if "weight" in key:
UpperCamelCase__ :Tuple = val[:dim, :]
UpperCamelCase__ :Optional[Any] = val[dim : dim * 2, :]
UpperCamelCase__ :int = val[-dim:, :]
elif "bias" in key:
UpperCamelCase__ :List[str] = val[:dim]
UpperCamelCase__ :List[str] = val[dim : dim * 2]
UpperCamelCase__ :int = val[-dim:]
else:
UpperCamelCase__ :str = config.hidden_size
UpperCamelCase__ :List[Any] = """vit.encoder.layer."""
if "weight" in key:
UpperCamelCase__ :Tuple = val[:dim, :]
UpperCamelCase__ :int = val[dim : dim * 2, :]
UpperCamelCase__ :List[Any] = val[-dim:, :]
elif "bias" in key:
UpperCamelCase__ :Optional[Any] = val[:dim]
UpperCamelCase__ :Union[str, Any] = val[dim : dim * 2]
UpperCamelCase__ :List[Any] = val[-dim:]
else:
UpperCamelCase__ :str = val
return orig_state_dict
def A ( lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Union[str, Any]:
UpperCamelCase__ :Dict = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase__ :Dict = 1024
UpperCamelCase__ :int = 4096
UpperCamelCase__ :Optional[Any] = 24
UpperCamelCase__ :List[str] = 16
elif "huge" in checkpoint_url:
UpperCamelCase__ :Union[str, Any] = 14
UpperCamelCase__ :Optional[Any] = 1280
UpperCamelCase__ :Tuple = 5120
UpperCamelCase__ :Any = 32
UpperCamelCase__ :str = 16
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
UpperCamelCase__ :List[str] = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )["""model"""]
UpperCamelCase__ :Union[str, Any] = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase__ :Any = convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
UpperCamelCase__ :Optional[Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
UpperCamelCase__ :Dict = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
UpperCamelCase__ :Optional[int] = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase__ :Union[str, Any] = image_processor(images=lowercase__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
UpperCamelCase__ :List[Any] = model(**lowercase__ )
UpperCamelCase__ :Dict = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase__ :Optional[Any] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase__ :List[str] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase__ :Dict = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 702
|
from collections.abc import Callable
import numpy as np
def A ( lowercase__ : Callable , lowercase__ : float , lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> np.ndarray:
UpperCamelCase__ :List[str] = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase__ :int = np.zeros((n + 1,) )
UpperCamelCase__ :Union[str, Any] = ya
UpperCamelCase__ :List[Any] = xa
for k in range(lowercase__ ):
UpperCamelCase__ :List[str] = y[k] + step_size * ode_func(lowercase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 383
| 0
|
import os
from collections.abc import Iterator
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_SCREAMING_SNAKE_CASE ):
_A = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_SCREAMING_SNAKE_CASE )[1] in (".py", ".ipynb"):
yield os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).lstrip('./' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return F"{i * ' '}*" if i else "\n##"
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_SCREAMING_SNAKE_CASE ) or old_parts[i] != new_part) and new_part:
print(F"{md_prefix(_SCREAMING_SNAKE_CASE )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = "." ) -> None:
"""simple docstring"""
_A = ''
for filepath in sorted(good_file_paths(_SCREAMING_SNAKE_CASE ) ):
_A, _A = os.path.split(_SCREAMING_SNAKE_CASE )
if filepath != old_path:
_A = print_path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = (filepath.count(os.sep ) + 1) if filepath else 0
_A = F"{filepath}/{filename}".replace(' ' , '%20' )
_A = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F"{md_prefix(_SCREAMING_SNAKE_CASE )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(".")
| 27
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__a: Dict = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
__a: Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__a: Optional[Any] = dict(zip(vocab, range(len(vocab))))
__a: Union[str, Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
__a: Tuple = Path(tmpdirname)
__a: Optional[Any] = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
__a: List[Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
__a: Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
__a: str = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__a: Tuple = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__a: List[Any] = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
__a: Dict = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
__a: str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 152
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __magic_name__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = TFAutoModel.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = AutoModel.from_pretrained(__magic_name__ , from_tf=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = TFAutoModelForPreTraining.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = AutoModelForPreTraining.from_pretrained(__magic_name__ , from_tf=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
_lowerCAmelCase , _lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ , from_pt=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__magic_name__ , from_tf=__magic_name__ )
_lowerCAmelCase , _lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ , from_tf=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = AutoModelWithLMHead.from_pretrained(__magic_name__ , from_tf=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
_lowerCAmelCase , _lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ , from_pt=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(__magic_name__ , from_tf=__magic_name__ )
_lowerCAmelCase , _lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ , from_tf=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
_lowerCAmelCase , _lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ , from_pt=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ , from_tf=__magic_name__ )
_lowerCAmelCase , _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ , from_tf=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(__magic_name__ , from_tf=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
_lowerCAmelCase = AutoModelForQuestionAnswering.from_pretrained(__magic_name__ , from_tf=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__magic_name__ ) , 1_4_4_1_0 )
_lowerCAmelCase = AutoModelWithLMHead.from_pretrained(__magic_name__ , from_tf=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__magic_name__ ) , 1_4_4_1_0 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__magic_name__ ) , 1_4_4_1_0 )
_lowerCAmelCase = AutoModelWithLMHead.from_pretrained(__magic_name__ , from_tf=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__magic_name__ ) , 1_4_4_1_0 )
| 309
|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : str = FunnelTokenizer
UpperCamelCase : Dict = FunnelTokenizerFast
UpperCamelCase : List[Any] = True
UpperCamelCase : Dict = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_lowerCAmelCase = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowerCamelCase ( self , **__magic_name__ ):
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def _lowerCamelCase ( self , **__magic_name__ ):
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = 'UNwant\u00E9d,running'
_lowerCAmelCase = 'unwanted, running'
return input_text, output_text
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__magic_name__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [7, 4, 5, 1_0, 8, 9] )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
_lowerCAmelCase = tokenizer('UNwant\u00E9d,running' )
_lowerCAmelCase = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
_lowerCAmelCase = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 309
| 1
|
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase__ ( UpperCAmelCase__ = 8 ) -> str:
A_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(SCREAMING_SNAKE_CASE_ )
A_ = i // 3
A_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
A_ = (
chars_incl
+ random(SCREAMING_SNAKE_CASE_, quotient + remainder )
+ random(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
+ random(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
)
A_ = list(SCREAMING_SNAKE_CASE_ )
shuffle(SCREAMING_SNAKE_CASE_ )
return "".join(SCREAMING_SNAKE_CASE_ )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
return "".join(secrets.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
pass # Put your code here...
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
pass # Put your code here...
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
pass # Put your code here...
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ = 8 ) -> bool:
if len(SCREAMING_SNAKE_CASE_ ) < min_length:
# Your Password must be at least 8 characters long
return False
A_ = any(char in ascii_uppercase for char in password )
A_ = any(char in ascii_lowercase for char in password )
A_ = any(char in digits for char in password )
A_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase__ ( ) -> List[Any]:
A_ = int(input("""Please indicate the max length of your password: """ ).strip() )
A_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""", password_generator(SCREAMING_SNAKE_CASE_ ) )
print(
"""Alternative Password generated:""", alternative_password_generator(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 288
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__UpperCamelCase = input('''Enter image url: ''').strip()
print(f'''Downloading image from {url} ...''')
__UpperCamelCase = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
__UpperCamelCase = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
__UpperCamelCase = requests.get(image_url).content
__UpperCamelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 247
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __lowercase (_UpperCAmelCase , _UpperCAmelCase ):
_UpperCamelCase = """bit"""
_UpperCamelCase = ["""preactivation""", """bottleneck"""]
_UpperCamelCase = ["""SAME""", """VALID"""]
def __init__( self , A_=3 , A_=64 , A_=[256, 512, 1024, 2048] , A_=[3, 4, 6, 3] , A_="preactivation" , A_="relu" , A_=None , A_=32 , A_=0.0 , A_=False , A_=32 , A_=1 , A_=None , A_=None , **A_ , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowerCAmelCase : List[Any] = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : List[Any] = embedding_size
__lowerCAmelCase : Any = hidden_sizes
__lowerCAmelCase : Union[str, Any] = depths
__lowerCAmelCase : Dict = layer_type
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : Optional[Any] = global_padding
__lowerCAmelCase : List[str] = num_groups
__lowerCAmelCase : List[Any] = drop_path_rate
__lowerCAmelCase : List[Any] = embedding_dynamic_padding
__lowerCAmelCase : int = output_stride
__lowerCAmelCase : Optional[int] = width_factor
__lowerCAmelCase : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(A_ ) + 1 )]
__lowerCAmelCase, __lowerCAmelCase : List[Any] = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 583
|
from __future__ import annotations
import typing
from collections import Counter
def _lowercase ( lowercase__ ):
__lowerCAmelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowercase__ , max_perimeter + 1 ):
__lowerCAmelCase : Optional[int] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowercase__ ):
__lowerCAmelCase : Optional[int] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowercase ( lowercase__ = 1_0_0_0 ):
__lowerCAmelCase : Optional[int] = pythagorean_triple(lowercase__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions")
| 583
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.