code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_A : Optional[int] = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
_A : str = json.load(f)
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self , A_ ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(__UpperCAmelCase )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FSMTForConditionalGeneration.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = f'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = bleu_data[pair]['src']
SCREAMING_SNAKE_CASE__ = bleu_data[pair]['tgt']
SCREAMING_SNAKE_CASE__ = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , truncation=__UpperCAmelCase , padding='''longest''' ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = calculate_bleu(__UpperCAmelCase , __UpperCAmelCase )
print(__UpperCAmelCase )
self.assertGreaterEqual(scores['''bleu'''] , __UpperCAmelCase )
| 100
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Optional[Any]=1024 ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =[], []
SCREAMING_SNAKE_CASE_ : int =list(zip(lowerCAmelCase_ ,lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =sorted_examples[0]
def is_too_big(lowerCAmelCase_ : Dict ):
return tok(lowerCAmelCase_ ,return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE_ : int =new_src + ' ' + src
SCREAMING_SNAKE_CASE_ : str =new_tgt + ' ' + tgt
if is_too_big(lowerCAmelCase_ ) or is_too_big(lowerCAmelCase_ ): # cant fit, finalize example
finished_src.append(lowerCAmelCase_ )
finished_tgt.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any =src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCAmelCase_ )
finished_tgt.append(lowerCAmelCase_ )
return finished_src, finished_tgt
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : Path ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] =Path(lowerCAmelCase_ )
save_path.mkdir(exist_ok=lowerCAmelCase_ )
for split in ["train"]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
SCREAMING_SNAKE_CASE_ : int =[x.rstrip() for x in Path(lowerCAmelCase_ ).open().readlines()]
SCREAMING_SNAKE_CASE_ : Dict =[x.rstrip() for x in Path(lowerCAmelCase_ ).open().readlines()]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int =pack_examples(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
print(F"""packed {split} split from {len(lowerCAmelCase_ )} examples -> {len(lowerCAmelCase_ )}.""" )
Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(lowerCAmelCase_ ) )
Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(lowerCAmelCase_ ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(lowerCAmelCase_ ,save_path / F"""{split}.source""" )
shutil.copyfile(lowerCAmelCase_ ,save_path / F"""{split}.target""" )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =argparse.ArgumentParser()
parser.add_argument('--tok_name' ,type=lowerCAmelCase_ ,help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' ,type=lowerCAmelCase_ ,default=128 )
parser.add_argument('--data_dir' ,type=lowerCAmelCase_ )
parser.add_argument('--save_path' ,type=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Dict =parser.parse_args()
SCREAMING_SNAKE_CASE_ : Dict =AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCAmelCase_ ,Path(args.data_dir ) ,args.max_seq_len ,args.save_path )
if __name__ == "__main__":
packer_cli()
| 220
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =torch.load(__UpperCamelCase , map_location='cpu' )
if "model" in sd.keys():
_lowerCAmelCase =torch.load(__UpperCamelCase , map_location='cpu' )['model']
# pop unnecessary weights
_lowerCAmelCase =[
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__UpperCamelCase )
_lowerCAmelCase ={
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase =sd.pop(__UpperCamelCase )
_lowerCAmelCase =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase =sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase =key.replace('.qkv_proj.' , '.q_proj.' )
_lowerCAmelCase =key.replace('.qkv_proj.' , '.k_proj.' )
_lowerCAmelCase =key.replace('.qkv_proj.' , '.v_proj.' )
_lowerCAmelCase =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =torch.split(__UpperCamelCase , depth // 3 , dim=0 )
_lowerCAmelCase =q
_lowerCAmelCase =k
_lowerCAmelCase =v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase__ ( a__ , a__ , a__=None ):
'''simple docstring'''
_lowerCAmelCase =load_checkpoint(__UpperCamelCase )
if config is not None:
_lowerCAmelCase =OPTConfig.from_pretrained(__UpperCamelCase )
else:
_lowerCAmelCase =OPTConfig()
_lowerCAmelCase =OPTModel(__UpperCamelCase ).half().eval()
model.load_state_dict(__UpperCamelCase )
# Check results
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
lowercase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 713
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __magic_name__ ( _a):
_UpperCAmelCase : torch.FloatTensor
class __magic_name__ ( _a , _a):
@register_to_config
def __init__( self : str ,__SCREAMING_SNAKE_CASE : int = 6_5_5_3_6 ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : int = 2 ,__SCREAMING_SNAKE_CASE : int = 2 ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : str = "fourier" ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : float = 0.0 ,__SCREAMING_SNAKE_CASE : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,__SCREAMING_SNAKE_CASE : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,__SCREAMING_SNAKE_CASE : Tuple[str] = "UNetMidBlock1D" ,__SCREAMING_SNAKE_CASE : str = None ,__SCREAMING_SNAKE_CASE : Tuple[int] = (3_2, 3_2, 6_4) ,__SCREAMING_SNAKE_CASE : str = None ,__SCREAMING_SNAKE_CASE : int = 8 ,__SCREAMING_SNAKE_CASE : int = 1 ,__SCREAMING_SNAKE_CASE : bool = False ,):
super().__init__()
UpperCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase = GaussianFourierProjection(
embedding_size=8 ,set_W_to_weight=__SCREAMING_SNAKE_CASE ,log=__SCREAMING_SNAKE_CASE ,flip_sin_to_cos=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase = Timesteps(
block_out_channels[0] ,flip_sin_to_cos=__SCREAMING_SNAKE_CASE ,downscale_freq_shift=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase = block_out_channels[0] * 4
UpperCAmelCase = TimestepEmbedding(
in_channels=__SCREAMING_SNAKE_CASE ,time_embed_dim=__SCREAMING_SNAKE_CASE ,act_fn=__SCREAMING_SNAKE_CASE ,out_dim=block_out_channels[0] ,)
UpperCAmelCase = nn.ModuleList([] )
UpperCAmelCase = None
UpperCAmelCase = nn.ModuleList([] )
UpperCAmelCase = None
# down
UpperCAmelCase = in_channels
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = output_channel
UpperCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
UpperCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE ,num_layers=__SCREAMING_SNAKE_CASE ,in_channels=__SCREAMING_SNAKE_CASE ,out_channels=__SCREAMING_SNAKE_CASE ,temb_channels=block_out_channels[0] ,add_downsample=not is_final_block or downsample_each_block ,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
UpperCAmelCase = get_mid_block(
__SCREAMING_SNAKE_CASE ,in_channels=block_out_channels[-1] ,mid_channels=block_out_channels[-1] ,out_channels=block_out_channels[-1] ,embed_dim=block_out_channels[0] ,num_layers=__SCREAMING_SNAKE_CASE ,add_downsample=__SCREAMING_SNAKE_CASE ,)
# up
UpperCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase = out_channels
else:
UpperCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = output_channel
UpperCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(__SCREAMING_SNAKE_CASE ) - 1 else final_upsample_channels
)
UpperCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
UpperCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE ,num_layers=__SCREAMING_SNAKE_CASE ,in_channels=__SCREAMING_SNAKE_CASE ,out_channels=__SCREAMING_SNAKE_CASE ,temb_channels=block_out_channels[0] ,add_upsample=not is_final_block ,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = output_channel
# out
UpperCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 ,3_2 )
UpperCAmelCase = get_out_block(
out_block_type=__SCREAMING_SNAKE_CASE ,num_groups_out=__SCREAMING_SNAKE_CASE ,embed_dim=block_out_channels[0] ,out_channels=__SCREAMING_SNAKE_CASE ,act_fn=__SCREAMING_SNAKE_CASE ,fc_dim=block_out_channels[-1] // 4 ,)
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : torch.FloatTensor ,__SCREAMING_SNAKE_CASE : Union[torch.Tensor, float, int] ,__SCREAMING_SNAKE_CASE : bool = True ,):
UpperCAmelCase = timestep
if not torch.is_tensor(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = torch.tensor([timesteps] ,dtype=torch.long ,device=sample.device )
elif torch.is_tensor(__SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
UpperCAmelCase = timesteps[None].to(sample.device )
UpperCAmelCase = self.time_proj(__SCREAMING_SNAKE_CASE )
if self.config.use_timestep_embedding:
UpperCAmelCase = self.time_mlp(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase = timestep_embed[..., None]
UpperCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
UpperCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
UpperCAmelCase = ()
for downsample_block in self.down_blocks:
UpperCAmelCase , UpperCAmelCase = downsample_block(hidden_states=__SCREAMING_SNAKE_CASE ,temb=__SCREAMING_SNAKE_CASE )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
UpperCAmelCase = down_block_res_samples[-1:]
UpperCAmelCase = down_block_res_samples[:-1]
UpperCAmelCase = upsample_block(__SCREAMING_SNAKE_CASE ,res_hidden_states_tuple=__SCREAMING_SNAKE_CASE ,temb=__SCREAMING_SNAKE_CASE )
# 5. post-process
if self.out_block:
UpperCAmelCase = self.out_block(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__SCREAMING_SNAKE_CASE )
| 333
|
import os
def __UpperCamelCase ( ):
"""simple docstring"""
with open(os.path.dirname(_lowerCAmelCase ) + "/grid.txt" ) as f:
UpperCAmelCase = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCAmelCase ) for x in f.readline().split()] )
UpperCAmelCase = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCAmelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCAmelCase = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCAmelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCAmelCase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCAmelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCAmelCase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCAmelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCAmelCase = temp
return maximum
if __name__ == "__main__":
print(solution())
| 333
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__A ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__A ={
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__A ={
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
__A ={
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase :Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase :str = ElectraTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , _lowerCamelCase) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCamelCase) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCamelCase) != tokenize_chinese_chars
):
UpperCAmelCase__ : Union[str, Any] = getattr(_lowerCamelCase , normalizer_state.pop("""type"""))
UpperCAmelCase__ : str = do_lower_case
UpperCAmelCase__ : Dict = strip_accents
UpperCAmelCase__ : Any = tokenize_chinese_chars
UpperCAmelCase__ : str = normalizer_class(**_lowerCamelCase)
UpperCAmelCase__ : List[Any] = do_lower_case
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None):
UpperCAmelCase__ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : int = [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : List[Any] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase)
return tuple(_lowerCamelCase)
| 703
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__A =logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : List[str] = R"""\w+[.]\d+"""
UpperCAmelCase__ : List[Any] = re.findall(UpperCamelCase__ , UpperCamelCase__ )
for pat in pats:
UpperCAmelCase__ : str = key.replace(UpperCamelCase__ , """_""".join(pat.split(""".""" ) ) )
return key
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : str = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase__ : str = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase__ : Any = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase__ : Any = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase__ : Optional[int] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase__ : Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase__ : Optional[Any] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase__ : Dict = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=4_2 ):
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase__ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase__ : str = flax_model.init_weights(PRNGKey(UpperCamelCase__ ) )
UpperCAmelCase__ : Union[str, Any] = flatten_dict(UpperCamelCase__ )
UpperCAmelCase__ : int = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase__ : str = rename_key(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = rename_key_and_reshape_tensor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : str = jnp.asarray(UpperCamelCase__ )
return unflatten_dict(UpperCamelCase__ )
| 113
| 0
|
from collections import deque
from math import floor
from random import random
from time import time
class _lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = {}
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ) -> str:
"""simple docstring"""
if self.graph.get(__SCREAMING_SNAKE_CASE ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCamelCase__ : Optional[int] = [[w, v]]
if not self.graph.get(__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[int] = []
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
return list(self.graph )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if self.graph.get(__SCREAMING_SNAKE_CASE ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=-2 , __SCREAMING_SNAKE_CASE=-1 ) -> Tuple:
"""simple docstring"""
if s == d:
return []
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Any = []
if s == -2:
UpperCamelCase__ : Tuple = list(self.graph )[0]
stack.append(__SCREAMING_SNAKE_CASE )
visited.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__SCREAMING_SNAKE_CASE )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__SCREAMING_SNAKE_CASE ) != 0:
UpperCamelCase__ : str = stack[len(__SCREAMING_SNAKE_CASE ) - 1]
else:
UpperCamelCase__ : List[Any] = ss
# check if se have reached the starting point
if len(__SCREAMING_SNAKE_CASE ) == 0:
return visited
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=-1 ) -> Union[str, Any]:
"""simple docstring"""
if c == -1:
UpperCamelCase__ : Tuple = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(__SCREAMING_SNAKE_CASE ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
UpperCamelCase__ : int = floor(random() * c ) + 1
if n != i:
self.add_pair(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=-2 ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = deque()
UpperCamelCase__ : int = []
if s == -2:
UpperCamelCase__ : Optional[int] = list(self.graph )[0]
d.append(__SCREAMING_SNAKE_CASE )
visited.append(__SCREAMING_SNAKE_CASE )
while d:
UpperCamelCase__ : Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Any = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return len(self.graph[u] )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=-2 ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = []
UpperCamelCase__ : List[str] = []
if s == -2:
UpperCamelCase__ : List[Any] = list(self.graph )[0]
stack.append(__SCREAMING_SNAKE_CASE )
visited.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : List[str] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__SCREAMING_SNAKE_CASE ) != 0:
UpperCamelCase__ : int = stack[len(__SCREAMING_SNAKE_CASE ) - 1]
else:
UpperCamelCase__ : Any = ss
# check if se have reached the starting point
if len(__SCREAMING_SNAKE_CASE ) == 0:
return sorted_nodes
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[Any] = list(self.graph )[0]
stack.append(__SCREAMING_SNAKE_CASE )
visited.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = -2
UpperCamelCase__ : int = []
UpperCamelCase__ : Union[str, Any] = s
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : str = len(__SCREAMING_SNAKE_CASE ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : Tuple = True
if len(__SCREAMING_SNAKE_CASE ) != 0:
UpperCamelCase__ : Any = stack[len(__SCREAMING_SNAKE_CASE ) - 1]
else:
UpperCamelCase__ : List[str] = False
indirect_parents.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = s
UpperCamelCase__ : Any = ss
# check if se have reached the starting point
if len(__SCREAMING_SNAKE_CASE ) == 0:
return list(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : List[str] = list(self.graph )[0]
stack.append(__SCREAMING_SNAKE_CASE )
visited.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = -2
UpperCamelCase__ : str = []
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : List[Any] = len(__SCREAMING_SNAKE_CASE ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : Dict = True
if len(__SCREAMING_SNAKE_CASE ) != 0:
UpperCamelCase__ : Union[str, Any] = stack[len(__SCREAMING_SNAKE_CASE ) - 1]
else:
UpperCamelCase__ : Optional[Any] = False
indirect_parents.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = s
UpperCamelCase__ : int = ss
# check if se have reached the starting point
if len(__SCREAMING_SNAKE_CASE ) == 0:
return False
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=-2 , __SCREAMING_SNAKE_CASE=-1 ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = time()
self.dfs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = time()
return end - begin
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=-2 ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = time()
self.bfs(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = time()
return end - begin
class _lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Any = {}
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ) -> Any:
"""simple docstring"""
if self.graph.get(__SCREAMING_SNAKE_CASE ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCamelCase__ : List[Any] = [[w, v]]
# add the other way
if self.graph.get(__SCREAMING_SNAKE_CASE ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCamelCase__ : Dict = [[w, u]]
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
if self.graph.get(__SCREAMING_SNAKE_CASE ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__SCREAMING_SNAKE_CASE )
# the other way round
if self.graph.get(__SCREAMING_SNAKE_CASE ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=-2 , __SCREAMING_SNAKE_CASE=-1 ) -> int:
"""simple docstring"""
if s == d:
return []
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : int = []
if s == -2:
UpperCamelCase__ : int = list(self.graph )[0]
stack.append(__SCREAMING_SNAKE_CASE )
visited.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__SCREAMING_SNAKE_CASE )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__SCREAMING_SNAKE_CASE ) != 0:
UpperCamelCase__ : int = stack[len(__SCREAMING_SNAKE_CASE ) - 1]
else:
UpperCamelCase__ : str = ss
# check if se have reached the starting point
if len(__SCREAMING_SNAKE_CASE ) == 0:
return visited
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
"""simple docstring"""
if c == -1:
UpperCamelCase__ : Any = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(__SCREAMING_SNAKE_CASE ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
UpperCamelCase__ : Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=-2 ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = deque()
UpperCamelCase__ : Any = []
if s == -2:
UpperCamelCase__ : List[str] = list(self.graph )[0]
d.append(__SCREAMING_SNAKE_CASE )
visited.append(__SCREAMING_SNAKE_CASE )
while d:
UpperCamelCase__ : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return len(self.graph[u] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[int] = list(self.graph )[0]
stack.append(__SCREAMING_SNAKE_CASE )
visited.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = -2
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : Union[str, Any] = s
UpperCamelCase__ : int = False
UpperCamelCase__ : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : int = len(__SCREAMING_SNAKE_CASE ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : str = True
if len(__SCREAMING_SNAKE_CASE ) != 0:
UpperCamelCase__ : List[str] = stack[len(__SCREAMING_SNAKE_CASE ) - 1]
else:
UpperCamelCase__ : List[Any] = False
indirect_parents.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : Optional[int] = ss
# check if se have reached the starting point
if len(__SCREAMING_SNAKE_CASE ) == 0:
return list(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int = []
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Any = list(self.graph )[0]
stack.append(__SCREAMING_SNAKE_CASE )
visited.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = -2
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : List[Any] = s
UpperCamelCase__ : str = False
UpperCamelCase__ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : Any = len(__SCREAMING_SNAKE_CASE ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : Dict = True
if len(__SCREAMING_SNAKE_CASE ) != 0:
UpperCamelCase__ : Dict = stack[len(__SCREAMING_SNAKE_CASE ) - 1]
else:
UpperCamelCase__ : Optional[int] = False
indirect_parents.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = s
UpperCamelCase__ : List[Any] = ss
# check if se have reached the starting point
if len(__SCREAMING_SNAKE_CASE ) == 0:
return False
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
return list(self.graph )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=-2 , __SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Tuple = time()
self.dfs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = time()
return end - begin
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=-2 ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = time()
self.bfs(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = time()
return end - begin
| 285
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = DanceDiffusionPipeline
SCREAMING_SNAKE_CASE_ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
SCREAMING_SNAKE_CASE_ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__SCREAMING_SNAKE_CASE , use_timestep_embedding=__SCREAMING_SNAKE_CASE , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
UpperCamelCase__ : Union[str, Any] = IPNDMScheduler()
UpperCamelCase__ : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Any:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCamelCase__ : Optional[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : str = DanceDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = pipe(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = output.audios
UpperCamelCase__ : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase__ : List[Any] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Any = torch_device
UpperCamelCase__ : Any = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
UpperCamelCase__ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase__ : str = output.audios
UpperCamelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase__ : Tuple = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Any = torch_device
UpperCamelCase__ : Union[str, Any] = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
UpperCamelCase__ : Tuple = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = pipe(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase__ : List[Any] = output.audios
UpperCamelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase__ : Optional[Any] = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 285
| 1
|
import random
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = num - 1
A_ : Optional[Any] = 0
while s % 2 == 0:
A_ : Optional[int] = s // 2
t += 1
for _ in range(5 ):
A_ : Optional[int] = random.randrange(2 ,num - 1 )
A_ : Any = pow(__lowercase ,__lowercase ,__lowercase )
if v != 1:
A_ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A_ : Union[str, Any] = i + 1
A_ : Tuple = (v**2) % num
return True
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if num < 2:
return False
A_ : Optional[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowercase )
def UpperCamelCase ( __lowercase : int = 10_24 ):
'''simple docstring'''
while True:
A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__lowercase ):
return num
if __name__ == "__main__":
_UpperCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 70
|
import numpy as np
_UpperCAmelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : Any = np.array(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE )
A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = message.lower()
A_ : Tuple = message.replace(' ' , '' )
A_ : int = message.replace('j' , 'i' )
A_ : Any = np.empty((2, len(lowercase )) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
A_ : Union[str, Any] = numbers[0]
A_ : Union[str, Any] = numbers[1]
A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) )
A_ : int = ''
for numbers_index in range(len(lowercase ) ):
A_ : str = int(second_step[numbers_index * 2] )
A_ : str = int(second_step[(numbers_index * 2) + 1] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : Tuple = encoded_message + letter
return encoded_message
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = message.lower()
message.replace(' ' , '' )
A_ : Tuple = np.empty(2 * len(lowercase ) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] )
A_ : Optional[int] = numbers[0]
A_ : Dict = numbers[1]
A_ : Optional[int] = first_step.reshape((2, len(lowercase )) )
A_ : List[str] = ''
for numbers_index in range(len(lowercase ) ):
A_ : List[Any] = int(second_step[0, numbers_index] )
A_ : Optional[int] = int(second_step[1, numbers_index] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : str = decoded_message + letter
return decoded_message
| 70
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "swinv2"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
_lowerCAmelCase = (0, 0, 0, 0)
| 18
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( lowerCAmelCase__ : Dict ):
a__ , a__ : int = image.size
a__ , a__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ : Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
a__ : List[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
a__ : Any = image[None].transpose(0 , 3 , 1 , 2 )
a__ : Dict = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : VQModel , A__ : UNetaDModel , A__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : List[str] , A__ : Union[torch.Tensor, PIL.Image.Image] = None , A__ : Optional[int] = 1 , A__ : Optional[int] = 1_0_0 , A__ : Optional[float] = 0.0 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(A__ , PIL.Image.Image ):
a__ : List[Any] = 1
elif isinstance(A__ , torch.Tensor ):
a__ : List[str] = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A__ )}' )
if isinstance(A__ , PIL.Image.Image ):
a__ : Union[str, Any] = preprocess(A__ )
a__ , a__ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a__ : Optional[int] = next(self.unet.parameters() ).dtype
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=self.device , dtype=A__ )
a__ : Any = image.to(device=self.device , dtype=A__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A__ , device=self.device )
a__ : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ : str = {}
if accepts_eta:
a__ : Dict = eta
for t in self.progress_bar(A__ ):
# concat latents and low resolution image in the channel dimension.
a__ : str = torch.cat([latents, image] , dim=1 )
a__ : Optional[Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
a__ : Union[str, Any] = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VQVAE
a__ : List[Any] = self.vqvae.decode(A__ ).sample
a__ : List[Any] = torch.clamp(A__ , -1.0 , 1.0 )
a__ : Optional[Any] = image / 2 + 0.5
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Union[str, Any] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688
| 0
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a ( A__ : Tuple , A__ : List[Any] , A__ : Optional[int] , A__ : Dict , A__ : Any=False , A__ : str=True ) -> str:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_lowercase , _lowercase , _lowercase , _lowercase =MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_lowercase =cached_file(A__ , A__ , force_download=not use_cached_models )
_lowercase =config_class.from_json_file(A__ )
_lowercase =True
_lowercase =True
print(F'''Building TensorFlow model from configuration: {config}''' )
_lowercase =model_class(A__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_lowercase =cached_file(
A__ , A__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_lowercase =load_pytorch_checkpoint_in_tfa_model(A__ , A__ )
if compare_with_pt_model:
_lowercase =tf_model(tf_model.dummy_inputs , training=A__ ) # build the network
_lowercase =torch.load(A__ , map_location='cpu' )
_lowercase =pt_model_class.from_pretrained(
pretrained_model_name_or_path=A__ , config=A__ , state_dict=A__ )
with torch.no_grad():
_lowercase =pt_model(**pt_model.dummy_inputs )
_lowercase =pto[0].numpy()
_lowercase =tfo[0].numpy()
_lowercase =np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A__ , save_format='h5' )
def a ( A__ : str , A__ : str , A__ : Optional[Any]=None , A__ : Any=None , A__ : Optional[int]=False , A__ : Optional[int]=False , A__ : int=False , A__ : str=False , ) -> List[Any]:
"""simple docstring"""
if args_model_type is None:
_lowercase =list(MODEL_CLASSES.keys() )
else:
_lowercase =[args_model_type]
for j, model_type in enumerate(A__ , start=1 ):
print('=' * 100 )
print(F''' Converting model type {j}/{len(A__ )}: {model_type}''' )
print('=' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_lowercase =list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_lowercase =model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A__ , A__ ) , start=1 ):
print('-' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
_lowercase =model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(A__ )}: {model_shortcut_name} - model_type {model_type}''' )
print('-' * 100 )
if config_shortcut_name in aws_config_map:
_lowercase =cached_file(A__ , A__ , force_download=not use_cached_models )
else:
_lowercase =config_shortcut_name
if model_shortcut_name in aws_model_maps:
_lowercase =cached_file(A__ , A__ , force_download=not use_cached_models )
else:
_lowercase =model_shortcut_name
if os.path.isfile(A__ ):
_lowercase ='converted_model'
convert_pt_checkpoint_to_tf(
model_type=A__ , pytorch_checkpoint_path=A__ , config_file=A__ , tf_dump_path=os.path.join(A__ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=A__ , )
if remove_cached_files:
os.remove(A__ )
os.remove(A__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 380
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_ = '<<<<<<< This should probably be modified because it mentions: '
lowercase_ = '=======\n>>>>>>>\n'
lowercase_ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowercase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def a ( A__ : Namespace ) -> Any:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
@staticmethod
def A__ ( lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__( self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =get_logger('datasets-cli/converting' )
_lowercase =tfds_path
_lowercase =datasets_directory
def A__ ( self ) -> List[str]:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
_lowercase =os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowercase =os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
_lowercase =os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowercase =[]
_lowercase =[]
_lowercase ={}
if os.path.isdir(self._tfds_path ):
_lowercase =os.listdir(lowerCAmelCase )
else:
_lowercase =[os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
_lowercase =os.path.join(lowerCAmelCase , lowerCAmelCase )
_lowercase =os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
_lowercase =f.readlines()
_lowercase =[]
_lowercase =False
_lowercase =False
_lowercase =[]
for line in lines:
_lowercase =line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowercase ='import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
_lowercase =''
continue
elif "from absl import logging" in out_line:
_lowercase ='from datasets import logging\n'
elif "getLogger" in out_line:
_lowercase =out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowercase =True
_lowercase =list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowercase =re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowercase =re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
_lowercase ='from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowercase =True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowercase =f_name.replace('.py' , '' )
_lowercase =os.path.join(lowerCAmelCase , lowerCAmelCase )
_lowercase =os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowercase =os.path.basename(lowerCAmelCase )
_lowercase =imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 380
| 1
|
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def _a ( *_lowerCamelCase , **_lowerCamelCase ):
pass
@is_pipeline_test
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@require_torch
def _a ( self ):
lowerCamelCase__ =pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
lowerCamelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCamelCase__ =image_classifier(_lowerCamelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowerCamelCase ) , [
[{"score": 0.3_3_3, "label": "a"}, {"score": 0.3_3_3, "label": "b"}, {"score": 0.3_3_3, "label": "c"}],
[{"score": 0.3_3_3, "label": "a"}, {"score": 0.3_3_3, "label": "c"}, {"score": 0.3_3_3, "label": "b"}],
] , )
lowerCamelCase__ =image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
],
] , )
@require_tf
def _a ( self ):
lowerCamelCase__ =pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
lowerCamelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCamelCase__ =image_classifier(_lowerCamelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [{"score": 0.3_3_3, "label": "a"}, {"score": 0.3_3_3, "label": "b"}, {"score": 0.3_3_3, "label": "c"}] , )
lowerCamelCase__ =image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
{"score": 0.3_3_3, "label": ANY(_lowerCamelCase )},
],
] , )
@slow
@require_torch
def _a ( self ):
lowerCamelCase__ =pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCamelCase__ =image_classifier(_lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{"score": 0.5_1_1, "label": "remote"},
{"score": 0.4_8_5, "label": "cat"},
{"score": 0.0_0_4, "label": "plane"},
] , )
lowerCamelCase__ =image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{"score": 0.5_1_1, "label": "remote"},
{"score": 0.4_8_5, "label": "cat"},
{"score": 0.0_0_4, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _a ( self ):
lowerCamelCase__ =pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCamelCase__ =image_classifier(_lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{"score": 0.5_1_1, "label": "remote"},
{"score": 0.4_8_5, "label": "cat"},
{"score": 0.0_0_4, "label": "plane"},
] , )
lowerCamelCase__ =image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{"score": 0.5_1_1, "label": "remote"},
{"score": 0.4_8_5, "label": "cat"},
{"score": 0.0_0_4, "label": "plane"},
],
]
* 5 , )
| 530
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool:
lowerCamelCase__ =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCamelCase__ =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
lowerCamelCase__ =proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value)
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 ) -> None:
'''simple docstring'''
def identity_function(__lowerCAmelCase ) -> float:
return x
lowerCamelCase__ =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =(max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def lowerCamelCase_ ( __lowerCAmelCase ) -> None:
'''simple docstring'''
def function_to_integrate(__lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
lowerCamelCase__ =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 530
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Dict = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 516
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def A__ ( __A : int , __A : Optional[int] , __A : List[Any] , __A : Any , __A : Optional[int] ) ->str:
for attribute in key.split('''.''' ):
__A =getattr(__A , __A )
if weight_type is not None:
__A =getattr(__A , __A ).shape
else:
__A =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__A =value
elif weight_type == "weight_g":
__A =value
elif weight_type == "weight_v":
__A =value
elif weight_type == "bias":
__A =value
elif weight_type == "running_mean":
__A =value
elif weight_type == "running_var":
__A =value
elif weight_type == "num_batches_tracked":
__A =value
elif weight_type == "inv_freq":
__A =value
else:
__A =value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A__ ( __A : Optional[int] , __A : Union[str, Any] , __A : str ) ->List[str]:
__A =[]
__A =fairseq_model.state_dict()
__A =hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__A =False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
__A =True
else:
for key, mapped_key in MAPPING.items():
__A ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__A =True
if "*" in mapped_key:
__A =name.split(__A )[0].split('''.''' )[-2]
__A =mapped_key.replace('''*''' , __A )
if "pos_bias_u" in name:
__A =None
elif "pos_bias_v" in name:
__A =None
elif "weight_g" in name:
__A ='''weight_g'''
elif "weight_v" in name:
__A ='''weight_v'''
elif "bias" in name:
__A ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A ='''weight'''
elif "running_mean" in name:
__A ='''running_mean'''
elif "inv_freq" in name:
__A ='''inv_freq'''
elif "running_var" in name:
__A ='''running_var'''
elif "num_batches_tracked" in name:
__A ='''num_batches_tracked'''
else:
__A =None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def A__ ( __A : Dict , __A : Optional[int] , __A : Optional[int] , __A : Union[str, Any] , __A : List[str] ) ->Any:
__A =full_name.split('''conv_layers.''' )[-1]
__A =name.split('''.''' )
__A =int(items[0] )
__A =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__A =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__A =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__A =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__A =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__A )
@torch.no_grad()
def A__ ( __A : Optional[Any] , __A : Optional[Any] , __A : Any=None , __A : Optional[int]=None , __A : Dict=True ) ->Union[str, Any]:
if config_path is not None:
__A =WavaVecaConformerConfig.from_pretrained(__A , hidden_act='''swish''' )
else:
__A =WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__A ='''rotary'''
if is_finetuned:
if dict_path:
__A =Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__A =target_dict.pad_index
__A =target_dict.bos_index
__A =target_dict.eos_index
__A =len(target_dict.symbols )
__A =os.path.join(__A , '''vocab.json''' )
if not os.path.isdir(__A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
__A =target_dict.indices
# fairseq has the <pad> and <s> switched
__A =0
__A =1
with open(__A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__A , __A )
__A =WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__A , )
__A =True if config.feat_extract_norm == '''layer''' else False
__A =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
__A =WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
__A =WavaVecaConformerForCTC(__A )
else:
__A =WavaVecaConformerForPreTraining(__A )
if is_finetuned:
__A , __A , __A =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__A =argparse.Namespace(task='''audio_pretraining''' )
__A =fairseq.tasks.setup_task(__A )
__A , __A , __A =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__A )
__A =model[0].eval()
recursively_load_weights(__A , __A , not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 516
| 1
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class a__ ( A__ , A__ ):
UpperCAmelCase__ = '''pixel_values'''
UpperCAmelCase__ = False
UpperCAmelCase__ = TimmBackboneConfig
def __init__( self :int , _lowerCamelCase :int , **_lowerCamelCase :Optional[Any] ):
'''simple docstring'''
requires_backends(self , 'timm' )
super().__init__(_lowerCamelCase )
UpperCamelCase_ : List[str] =config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(_lowerCamelCase , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
UpperCamelCase_ : List[str] =getattr(_lowerCamelCase , 'use_pretrained_backbone' , _lowerCamelCase )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
UpperCamelCase_ : List[str] =config.out_indices if getattr(_lowerCamelCase , 'out_indices' , _lowerCamelCase ) is not None else (-1,)
UpperCamelCase_ : List[Any] =timm.create_model(
config.backbone , pretrained=_lowerCamelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=_lowerCamelCase , **_lowerCamelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
UpperCamelCase_ : Optional[Any] =self._backbone.return_layers
UpperCamelCase_ : int ={layer['module']: str(_lowerCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_lowerCamelCase )
@classmethod
def lowerCamelCase_ ( cls :Dict , _lowerCamelCase :List[str] , *_lowerCamelCase :List[Any] , **_lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
UpperCamelCase_ : List[str] =kwargs.pop('config' , TimmBackboneConfig() )
UpperCamelCase_ : Union[str, Any] =kwargs.pop('use_timm_backbone' , _lowerCamelCase )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
UpperCamelCase_ : Union[str, Any] =kwargs.pop('num_channels' , config.num_channels )
UpperCamelCase_ : List[str] =kwargs.pop('features_only' , config.features_only )
UpperCamelCase_ : Union[str, Any] =kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
UpperCamelCase_ : Union[str, Any] =kwargs.pop('out_indices' , config.out_indices )
UpperCamelCase_ : Tuple =TimmBackboneConfig(
backbone=_lowerCamelCase , num_channels=_lowerCamelCase , features_only=_lowerCamelCase , use_pretrained_backbone=_lowerCamelCase , out_indices=_lowerCamelCase , )
return super()._from_config(_lowerCamelCase , **_lowerCamelCase )
def lowerCamelCase_ ( self :Any , _lowerCamelCase :Dict ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Tuple , _lowerCamelCase :Tuple=None , _lowerCamelCase :Any=None , _lowerCamelCase :Optional[Any]=None , **_lowerCamelCase :Dict ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_ : Dict =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase_ : List[Any] =output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
UpperCamelCase_ : str =self._all_layers
UpperCamelCase_ : Optional[int] =self._backbone(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : Tuple =self._return_layers
UpperCamelCase_ : Optional[Any] =tuple(hidden_states[i] for i in self.out_indices )
else:
UpperCamelCase_ : List[str] =self._backbone(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : int =None
UpperCamelCase_ : Optional[int] =tuple(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =tuple(_lowerCamelCase ) if hidden_states is not None else None
if not return_dict:
UpperCamelCase_ : Dict =(feature_maps,)
if output_hidden_states:
UpperCamelCase_ : Dict =output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_lowerCamelCase , hidden_states=_lowerCamelCase , attentions=_lowerCamelCase )
| 357
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def A_ ( __lowercase , __lowercase , __lowercase = 1 , __lowercase = 1 , __lowercase = 1.0e4 , __lowercase = False , __lowercase = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
UpperCamelCase_ : Optional[int] =float(embedding_dim // 2 )
UpperCamelCase_ : Optional[Any] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCamelCase_ : List[Any] =min_timescale * jnp.exp(jnp.arange(__lowercase , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCamelCase_ : int =jnp.expand_dims(__lowercase , 1 ) * jnp.expand_dims(__lowercase , 0 )
# scale embeddings
UpperCamelCase_ : List[str] =scale * emb
if flip_sin_to_cos:
UpperCamelCase_ : Tuple =jnp.concatenate([jnp.cos(__lowercase ), jnp.sin(__lowercase )] , axis=1 )
else:
UpperCamelCase_ : Tuple =jnp.concatenate([jnp.sin(__lowercase ), jnp.cos(__lowercase )] , axis=1 )
UpperCamelCase_ : List[Any] =jnp.reshape(__lowercase , [jnp.shape(__lowercase )[0], embedding_dim] )
return signal
class a__ ( nn.Module ):
UpperCAmelCase__ = 32
UpperCAmelCase__ = jnp.floataa
@nn.compact
def __call__( self :Optional[Any] , _lowerCamelCase :List[str] ):
'''simple docstring'''
UpperCamelCase_ : Dict =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(_lowerCamelCase )
UpperCamelCase_ : Any =nn.silu(_lowerCamelCase )
UpperCamelCase_ : Tuple =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(_lowerCamelCase )
return temb
class a__ ( nn.Module ):
UpperCAmelCase__ = 32
UpperCAmelCase__ = False
UpperCAmelCase__ = 1
@nn.compact
def __call__( self :Union[str, Any] , _lowerCamelCase :Dict ):
'''simple docstring'''
return get_sinusoidal_embeddings(
_lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 357
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 183
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
SCREAMING_SNAKE_CASE_ = {
'''ctrl''': 256,
}
SCREAMING_SNAKE_CASE_ = {
'''Pregnancy''': 16_8629,
'''Christianity''': 7675,
'''Explain''': 10_6423,
'''Fitness''': 6_3440,
'''Saving''': 6_3163,
'''Ask''': 2_7171,
'''Ass''': 9_5985,
'''Joke''': 16_3509,
'''Questions''': 4_5622,
'''Thoughts''': 4_9605,
'''Retail''': 5_2342,
'''Feminism''': 16_4338,
'''Writing''': 1_1992,
'''Atheism''': 19_2263,
'''Netflix''': 4_8616,
'''Computing''': 3_9639,
'''Opinion''': 4_3213,
'''Alone''': 4_4967,
'''Funny''': 5_8917,
'''Gaming''': 4_0358,
'''Human''': 4088,
'''India''': 1331,
'''Joker''': 7_7138,
'''Diet''': 3_6206,
'''Legal''': 1_1859,
'''Norman''': 4939,
'''Tip''': 7_2689,
'''Weight''': 5_2343,
'''Movies''': 4_6273,
'''Running''': 2_3425,
'''Science''': 2090,
'''Horror''': 3_7793,
'''Confession''': 6_0572,
'''Finance''': 1_2250,
'''Politics''': 1_6360,
'''Scary''': 19_1985,
'''Support''': 1_2654,
'''Technologies''': 3_2516,
'''Teenage''': 6_6160,
'''Event''': 3_2769,
'''Learned''': 6_7460,
'''Notion''': 18_2770,
'''Wikipedia''': 3_7583,
'''Books''': 6665,
'''Extract''': 7_6050,
'''Confessions''': 10_2701,
'''Conspiracy''': 7_5932,
'''Links''': 6_3674,
'''Narcissus''': 15_0425,
'''Relationship''': 5_4766,
'''Relationships''': 13_4796,
'''Reviews''': 4_1671,
'''News''': 4256,
'''Translation''': 2_6820,
'''multilingual''': 12_8406,
}
def lowercase__ ( lowerCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase = set()
UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase = char
UpperCAmelCase = set(lowerCAmelCase )
return pairs
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = CONTROL_CODES
def __init__( self , lowercase_ , lowercase_ , lowercase_="<unk>" , **lowercase_ ) -> Optional[int]:
super().__init__(unk_token=lowercase_ , **lowercase_ )
with open(lowercase_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase = json.load(lowercase_ )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(lowercase_ , encoding='utf-8' ) as merges_handle:
UpperCAmelCase = merges_handle.read().split('\n' )[1:-1]
UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = {}
@property
def a_ ( self ) -> Union[str, Any]:
return len(self.encoder )
def a_ ( self ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def a_ ( self , lowercase_ ) -> int:
if token in self.cache:
return self.cache[token]
UpperCAmelCase = tuple(lowercase_ )
UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCAmelCase = get_pairs(lowercase_ )
if not pairs:
return token
while True:
UpperCAmelCase = min(lowercase_ , key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase = bigram
UpperCAmelCase = []
UpperCAmelCase = 0
while i < len(lowercase_ ):
try:
UpperCAmelCase = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase = tuple(lowercase_ )
UpperCAmelCase = new_word
if len(lowercase_ ) == 1:
break
else:
UpperCAmelCase = get_pairs(lowercase_ )
UpperCAmelCase = '@@ '.join(lowercase_ )
UpperCAmelCase = word[:-4]
UpperCAmelCase = word
return word
def a_ ( self , lowercase_ ) -> List[Any]:
UpperCAmelCase = []
UpperCAmelCase = re.findall(R'\S+\n?' , lowercase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowercase_ ).split(' ' ) ) )
return split_tokens
def a_ ( self , lowercase_ ) -> Tuple:
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def a_ ( self , lowercase_ ) -> Optional[Any]:
return self.decoder.get(lowercase_ , self.unk_token )
def a_ ( self , lowercase_ ) -> str:
UpperCAmelCase = ' '.join(lowercase_ ).replace('@@ ' , '' ).strip()
return out_string
def a_ ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + '\n' )
UpperCAmelCase = 0
with open(lowercase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase = token_index
writer.write(' '.join(lowercase_ ) + '\n' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 183
| 1
|
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 24
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : int = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 587
| 0
|
from __future__ import annotations
import math
def A__ ( lowerCamelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( lowerCamelCase ) -> list[int]:
UpperCamelCase_: Optional[Any] = str(lowerCamelCase )
UpperCamelCase_: List[str] = [n]
for i in range(1 , len(lowerCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def A__ ( lowerCamelCase ) -> bool:
if len(str(lowerCamelCase ) ) > 3:
if not is_prime(int(str(lowerCamelCase )[-3:] ) ) or not is_prime(int(str(lowerCamelCase )[:3] ) ):
return False
return True
def A__ ( lowerCamelCase = 11 ) -> list[int]:
UpperCamelCase_: list[int] = []
UpperCamelCase_: Dict = 13
while len(lowerCamelCase ) != count:
if validate(lowerCamelCase ):
UpperCamelCase_: Dict = list_truncated_nums(lowerCamelCase )
if all(is_prime(lowerCamelCase ) for i in list_nums ):
list_truncated_primes.append(lowerCamelCase )
num += 2
return list_truncated_primes
def A__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 670
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670
| 1
|
import math
from collections.abc import Iterator
from itertools import takewhile
def __UpperCAmelCase ( a_):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( ):
snake_case_ = 2
while True:
if is_prime(a_):
yield num
num += 1
def __UpperCAmelCase ( a_ = 2_00_00_00):
return sum(takewhile(lambda a_: x < n , prime_generator()))
if __name__ == "__main__":
print(f'{solution() = }')
| 198
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCAmelCase = Features({'''text''': Value('''string''' )} )
lowerCAmelCase = Features({} )
lowerCAmelCase = "text"
@property
def _UpperCamelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 198
| 1
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase ) -> Dict:
'''simple docstring'''
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 700
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 626
| 0
|
from __future__ import annotations
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , ) -> tuple[int, float, str]:
lowerCAmelCase__ : Union[str, Any] = cipher_alphabet or [chr(A__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase__ : Optional[int] = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
lowerCAmelCase__ : Union[str, Any] = frequencies_dict
if not case_sensitive:
lowerCAmelCase__ : Dict = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase__ : Optional[int] = {}
# cycle through all of the shifts
for shift in range(len(A__ ) ):
lowerCAmelCase__ : Optional[int] = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase__ : Union[str, Any] = (alphabet_letters.index(letter.lower() ) - shift) % len(
A__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase__ : Dict = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase__ : Tuple = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ : Any = decrypted_with_shift.lower().count(A__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ : Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ : List[str] = decrypted_with_shift.count(A__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ : Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ : Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase__ : Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCamelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase__ : List[Any] = min(
A__ , key=A__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Union[str, Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 678
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Optional[Any] = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 649
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 42
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
@register_to_config
def __init__( self , _lowerCAmelCase = 3 , _lowerCAmelCase = 3 , _lowerCAmelCase = ("DownEncoderBlock2D",) , _lowerCAmelCase = ("UpDecoderBlock2D",) , _lowerCAmelCase = (64,) , _lowerCAmelCase = 1 , _lowerCAmelCase = "silu" , _lowerCAmelCase = 3 , _lowerCAmelCase = 32 , _lowerCAmelCase = 256 , _lowerCAmelCase = 32 , _lowerCAmelCase = None , _lowerCAmelCase = 0.18_215 , _lowerCAmelCase = "group" , ):
super().__init__()
# pass init params to Encoder
_lowerCAmelCase = Encoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , down_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , double_z=_lowerCAmelCase , )
_lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCAmelCase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
_lowerCAmelCase = VectorQuantizer(_lowerCAmelCase , _lowerCAmelCase , beta=0.25 , remap=_lowerCAmelCase , sane_index_shape=_lowerCAmelCase )
_lowerCAmelCase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
# pass init params to Decoder
_lowerCAmelCase = Decoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , up_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , norm_type=_lowerCAmelCase , )
@apply_forward_hook
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = True ):
_lowerCAmelCase = self.encoder(_lowerCAmelCase )
_lowerCAmelCase = self.quant_conv(_lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCAmelCase )
@apply_forward_hook
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = True ):
# also go through quantization layer
if not force_not_quantize:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.quantize(_lowerCAmelCase )
else:
_lowerCAmelCase = h
_lowerCAmelCase = self.post_quant_conv(_lowerCAmelCase )
_lowerCAmelCase = self.decoder(_lowerCAmelCase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = True ):
_lowerCAmelCase = sample
_lowerCAmelCase = self.encode(_lowerCAmelCase ).latents
_lowerCAmelCase = self.decode(_lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
| 664
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase__ ( )->Any:
_lowerCAmelCase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase = get_sagemaker_input()
else:
_lowerCAmelCase = get_cluster_input()
return config
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str:
if subparsers is not None:
_lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str:
_lowerCAmelCase = get_user_input()
if args.config_file is not None:
_lowerCAmelCase = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def UpperCAmelCase__ ( )->List[Any]:
_lowerCAmelCase = config_command_parser()
_lowerCAmelCase = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 664
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( UpperCAmelCase_ ):
'''simple docstring'''
__UpperCamelCase = ['''image_processor''', '''tokenizer''']
__UpperCamelCase = '''CLIPImageProcessor'''
__UpperCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : Optional[Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : int ) -> int:
'''simple docstring'''
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowerCamelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : Optional[int] , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowercase = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if images is not None:
__lowercase = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def UpperCAmelCase ( self : Union[str, Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase ( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 375
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( UpperCAmelCase_ ):
'''simple docstring'''
__UpperCamelCase = '''rwkv'''
__UpperCamelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , __lowerCamelCase : Tuple=50_277 , __lowerCamelCase : List[str]=1_024 , __lowerCamelCase : List[Any]=4_096 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=1E-5 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : int=0 , __lowerCamelCase : Dict=6 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=True , **__lowerCamelCase : Optional[int] , ) -> List[Any]:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = context_length
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowercase = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowercase = layer_norm_epsilon
__lowercase = rescale_every
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 375
| 1
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 32
def UpperCamelCase__ ( _lowercase : Accelerator , _lowercase : int = 1_6 ) -> List[Any]:
__UpperCAmelCase: Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase: Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_lowercase : Any ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase: Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase: List[Any] = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase: Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowercase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase: Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase: int = 1_6
elif accelerator.mixed_precision != "no":
__UpperCAmelCase: Tuple = 8
else:
__UpperCAmelCase: str = None
return tokenizer.pad(
_lowercase , padding="""longest""" , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCAmelCase: Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCAmelCase: Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE_ = mocked_dataloaders # noqa: F811
def UpperCamelCase__ ( _lowercase : Tuple , _lowercase : Optional[int] ) -> List[Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _lowercase ) == "1":
__UpperCAmelCase: Dict = 2
# Initialize accelerator
__UpperCAmelCase: Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase: Any = config["""lr"""]
__UpperCAmelCase: List[Any] = int(config["""num_epochs"""] )
__UpperCAmelCase: Dict = int(config["""seed"""] )
__UpperCAmelCase: Any = int(config["""batch_size"""] )
__UpperCAmelCase: Optional[int] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_lowercase )
def inner_training_loop(_lowercase : Optional[int] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase: Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase: List[str] = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase: List[Any] = AdamW(params=model.parameters() , lr=_lowercase )
__UpperCAmelCase, __UpperCAmelCase: Any = get_dataloaders(_lowercase , _lowercase )
# Instantiate scheduler
__UpperCAmelCase: str = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=1_0_0 , num_training_steps=(len(_lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Optional[int] = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase: List[str] = model(**_lowercase )
__UpperCAmelCase: List[Any] = outputs.loss
accelerator.backward(_lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase: Optional[int] = model(**_lowercase )
__UpperCAmelCase: Union[str, Any] = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase, __UpperCAmelCase: int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCAmelCase: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCamelCase__ ( ) -> Tuple:
__UpperCAmelCase: Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_lowercase , default=_lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__UpperCAmelCase: List[str] = parser.parse_args()
__UpperCAmelCase: int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 466
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = size if size is not None else {"""height""": 18, """width""": 18}
__UpperCAmelCase: Tuple = parent
__UpperCAmelCase: Any = batch_size
__UpperCAmelCase: str = num_channels
__UpperCAmelCase: Any = image_size
__UpperCAmelCase: Optional[int] = min_resolution
__UpperCAmelCase: Tuple = max_resolution
__UpperCAmelCase: Any = do_resize
__UpperCAmelCase: Any = size
__UpperCAmelCase: Optional[Any] = do_normalize
def lowercase_ ( self ):
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = ImageGPTImageProcessor if is_vision_available() else None
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = ImageGPTImageProcessingTester(self )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """clusters""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__UpperCAmelCase: int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = self.image_processing_class(**self.image_processor_dict )
__UpperCAmelCase: str = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase: List[Any] = os.path.join(snake_case_ , """image_processor.json""" )
image_processor_first.to_json_file(snake_case_ )
__UpperCAmelCase: List[str] = self.image_processing_class.from_json_file(snake_case_ ).to_dict()
__UpperCAmelCase: str = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case_ )
__UpperCAmelCase: Any = self.image_processing_class.from_pretrained(snake_case_ ).to_dict()
__UpperCAmelCase: str = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowercase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase__ ( ) -> List[str]:
__UpperCAmelCase: Any = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
__UpperCAmelCase: List[Any] = Image.open(dataset[4]["""file"""] )
__UpperCAmelCase: Dict = Image.open(dataset[5]["""file"""] )
__UpperCAmelCase: Dict = [imagea, imagea]
return images
@require_vision
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
__UpperCAmelCase: Dict = prepare_images()
# test non-batched
__UpperCAmelCase: Tuple = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
__UpperCAmelCase: int = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ )
# test batched
__UpperCAmelCase: Union[str, Any] = image_processing(snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
__UpperCAmelCase: Tuple = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ )
| 466
| 1
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase : Any = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase : Union[str, Any] = """RegNetConfig"""
# Base docstring
_lowerCAmelCase : str = """facebook/regnet-y-040"""
_lowerCAmelCase : Dict = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_lowerCAmelCase : int = """facebook/regnet-y-040"""
_lowerCAmelCase : Optional[int] = """tabby, tabby cat"""
_lowerCAmelCase : List[Any] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,a_ ,a_ = 3 ,a_ = 1 ,a_ = 1 ,a_ = "relu" ,**a_ ,):
"""simple docstring"""
super().__init__(**a_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=a_ ,kernel_size=a_ ,strides=a_ ,padding='VALID' ,groups=a_ ,use_bias=a_ ,name='convolution' ,)
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='normalization' )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.convolution(self.padding(a_ ) )
lowerCAmelCase__ = self.normalization(a_ )
lowerCAmelCase__ = self.activation(a_ )
return hidden_state
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,a_ ,**a_ ):
"""simple docstring"""
super().__init__(**a_ )
lowerCAmelCase__ = config.num_channels
lowerCAmelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = shape_list(a_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ = tf.transpose(a_ ,perm=(0, 2, 3, 1) )
lowerCAmelCase__ = self.embedder(a_ )
return hidden_state
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,a_ ,a_ = 2 ,**a_ ):
"""simple docstring"""
super().__init__(**a_ )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=a_ ,kernel_size=1 ,strides=a_ ,use_bias=a_ ,name='convolution' )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='normalization' )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = False ):
"""simple docstring"""
return self.normalization(self.convolution(a_ ) ,training=a_ )
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,a_ ,a_ ,**a_ ):
"""simple docstring"""
super().__init__(**a_ )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ ,name='pooler' )
lowerCAmelCase__ = [
tf.keras.layers.ConvaD(filters=a_ ,kernel_size=1 ,activation='relu' ,name='attention.0' ),
tf.keras.layers.ConvaD(filters=a_ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2' ),
]
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowerCAmelCase__ = self.pooler(a_ )
for layer_module in self.attention:
lowerCAmelCase__ = layer_module(a_ )
lowerCAmelCase__ = hidden_state * pooled
return hidden_state
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,a_ ,a_ ,a_ ,a_ = 1 ,**a_ ):
"""simple docstring"""
super().__init__(**a_ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 ,out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(a_ ,stride=a_ ,name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ = [
TFRegNetConvLayer(a_ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0' ),
TFRegNetConvLayer(
a_ ,stride=a_ ,groups=a_ ,activation=config.hidden_act ,name='layer.1' ),
TFRegNetConvLayer(a_ ,kernel_size=1 ,activation=a_ ,name='layer.2' ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(a_ )
lowerCAmelCase__ = self.shortcut(a_ )
hidden_state += residual
lowerCAmelCase__ = self.activation(a_ )
return hidden_state
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,a_ ,a_ ,a_ ,a_ = 1 ,**a_ ):
"""simple docstring"""
super().__init__(**a_ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 ,out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(a_ ,stride=a_ ,name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut' )
)
lowerCAmelCase__ = [
TFRegNetConvLayer(a_ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0' ),
TFRegNetConvLayer(
a_ ,stride=a_ ,groups=a_ ,activation=config.hidden_act ,name='layer.1' ),
TFRegNetSELayer(a_ ,reduced_channels=int(round(in_channels / 4 ) ) ,name='layer.2' ),
TFRegNetConvLayer(a_ ,kernel_size=1 ,activation=a_ ,name='layer.3' ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(a_ )
lowerCAmelCase__ = self.shortcut(a_ )
hidden_state += residual
lowerCAmelCase__ = self.activation(a_ )
return hidden_state
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,a_ ,a_ ,a_ ,a_ = 2 ,a_ = 2 ,**a_ ):
"""simple docstring"""
super().__init__(**a_ )
lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(a_ ,a_ ,a_ ,stride=a_ ,name='layers.0' ),
*[layer(a_ ,a_ ,a_ ,name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(a_ )
return hidden_state
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,a_ ,**a_ ):
"""simple docstring"""
super().__init__(**a_ )
lowerCAmelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
a_ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,) )
lowerCAmelCase__ = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(a_ ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(a_ ,a_ ,a_ ,depth=a_ ,name=f'stages.{i+1}' ) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = False ,a_ = True ):
"""simple docstring"""
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(a_ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=a_ ,hidden_states=a_ )
@keras_serializable
class __snake_case ( tf.keras.layers.Layer ):
SCREAMING_SNAKE_CASE__ = RegNetConfig
def __init__( self ,a_ ,**a_ ):
"""simple docstring"""
super().__init__(**a_ )
lowerCAmelCase__ = config
lowerCAmelCase__ = TFRegNetEmbeddings(a_ ,name='embedder' )
lowerCAmelCase__ = TFRegNetEncoder(a_ ,name='encoder' )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ ,name='pooler' )
@unpack_inputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ,a_ = None ,a_ = False ,):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(a_ ,training=a_ )
lowerCAmelCase__ = self.encoder(
a_ ,output_hidden_states=a_ ,return_dict=a_ ,training=a_ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(a_ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ = tf.transpose(a_ ,perm=(0, 3, 1, 2) )
lowerCAmelCase__ = tf.transpose(a_ ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ = tuple([tf.transpose(a_ ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a_ ,pooler_output=a_ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class __snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE__ = RegNetConfig
SCREAMING_SNAKE_CASE__ = 'regnet'
SCREAMING_SNAKE_CASE__ = 'pixel_values'
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) ,dtype=tf.floataa )}
_lowerCAmelCase : List[Any] = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCAmelCase : str = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __snake_case , )
class __snake_case ( __snake_case ):
def __init__( self ,a_ ,*a_ ,**a_ ):
"""simple docstring"""
super().__init__(a_ ,*a_ ,**a_ )
lowerCAmelCase__ = TFRegNetMainLayer(a_ ,name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=a_ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ,a_ = None ,a_=False ,):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
pixel_values=a_ ,output_hidden_states=a_ ,return_dict=a_ ,training=a_ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __snake_case , )
class __snake_case ( __snake_case , __snake_case ):
def __init__( self ,a_ ,*a_ ,**a_ ):
"""simple docstring"""
super().__init__(a_ ,*a_ ,**a_ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = TFRegNetMainLayer(a_ ,name='regnet' )
# classification head
lowerCAmelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=a_ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_=False ,):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
a_ ,output_hidden_states=a_ ,return_dict=a_ ,training=a_ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier[0](a_ )
lowerCAmelCase__ = self.classifier[1](a_ )
lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=a_ ,logits=a_ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=a_ ,logits=a_ ,hidden_states=outputs.hidden_states )
| 193
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCamelCase_ : List[str] = logging.get_logger(__name__)
class __lowercase ( __snake_case ):
def __init__(self : int , *snake_case : Optional[Any] , **snake_case : Optional[Any] ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 461
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = {
'''7B''': 11_008,
'''13B''': 13_824,
'''30B''': 17_920,
'''65B''': 22_016,
'''70B''': 28_672,
}
SCREAMING_SNAKE_CASE_ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def lowercase (_lowerCAmelCase , _lowerCAmelCase=1 , _lowerCAmelCase=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_lowerCAmelCase ):
with open(_lowerCAmelCase , """r""" ) as f:
return json.load(_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
with open(_lowerCAmelCase , """w""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ):
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__lowerCAmelCase = os.path.join(_lowerCAmelCase , """tmp""" )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__lowerCAmelCase = read_json(os.path.join(_lowerCAmelCase , """params.json""" ) )
__lowerCAmelCase = NUM_SHARDS[model_size]
__lowerCAmelCase = params["""n_layers"""]
__lowerCAmelCase = params["""n_heads"""]
__lowerCAmelCase = n_heads // num_shards
__lowerCAmelCase = params["""dim"""]
__lowerCAmelCase = dim // n_heads
__lowerCAmelCase = 10_000.0
__lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , _lowerCAmelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__lowerCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
__lowerCAmelCase = n_heads_per_shard // num_key_value_heads
__lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
__lowerCAmelCase = n_heads
__lowerCAmelCase = n_heads_per_shard
__lowerCAmelCase = dim
# permute for sliced rotary
def permute(_lowerCAmelCase , _lowerCAmelCase=n_heads , _lowerCAmelCase=dim , _lowerCAmelCase=dim ):
return w.view(_lowerCAmelCase , dima // n_heads // 2 , 2 , _lowerCAmelCase ).transpose(1 , 2 ).reshape(_lowerCAmelCase , _lowerCAmelCase )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__lowerCAmelCase = torch.load(os.path.join(_lowerCAmelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
__lowerCAmelCase = [
torch.load(os.path.join(_lowerCAmelCase , f"""consolidated.{i:02d}.pth""" ) , map_location="""cpu""" )
for i in range(_lowerCAmelCase )
]
__lowerCAmelCase = 0
__lowerCAmelCase = {"""weight_map""": {}}
for layer_i in range(_lowerCAmelCase ):
__lowerCAmelCase = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
__lowerCAmelCase = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__lowerCAmelCase = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
__lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) )
__lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
__lowerCAmelCase = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(_lowerCAmelCase )] , dim=1 )
__lowerCAmelCase = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(_lowerCAmelCase )] , dim=0 )
__lowerCAmelCase = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(_lowerCAmelCase )] , dim=1 )
__lowerCAmelCase = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(_lowerCAmelCase )] , dim=0 )
__lowerCAmelCase = inv_freq
for k, v in state_dict.items():
__lowerCAmelCase = filename
param_count += v.numel()
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
__lowerCAmelCase = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
__lowerCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
__lowerCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_lowerCAmelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_lowerCAmelCase )] , dim=0 ),
}
for k, v in state_dict.items():
__lowerCAmelCase = filename
param_count += v.numel()
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
# Write configs
__lowerCAmelCase = {"""total_size""": param_count * 2}
write_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , """pytorch_model.bin.index.json""" ) )
__lowerCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
__lowerCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
__lowerCAmelCase = LlamaConfig(
hidden_size=_lowerCAmelCase , intermediate_size=compute_intermediate_size(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=_lowerCAmelCase , )
config.save_pretrained(_lowerCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
__lowerCAmelCase = LlamaForCausalLM.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_lowerCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_lowerCAmelCase , safe_serialization=_lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
# Initialize the tokenizer based on the `spm` model
__lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
__lowerCAmelCase = tokenizer_class(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=_lowerCAmelCase , help="""Whether or not to save using `safetensors`.""" )
__lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__lowerCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 573
| 0
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase_ = 128022
UpperCamelCase_ = 128028
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = MaMaaaTokenizer
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowercase : List[str] =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowercase : Optional[int] =dict(zip(A_ , range(len(A_ ) ) ) )
lowercase : Dict =Path(self.tmpdirname )
save_json(A_ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A_ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowercase : List[Any] =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Optional[int] , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] ='''</s>'''
lowercase : List[Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Optional[int] =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(A_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int =self.get_tokenizer()
lowercase : List[str] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [2, 3, 4, 5, 6] , )
lowercase : List[Any] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(A_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
lowercase : Tuple =tokenizer.convert_tokens_to_string(A_ )
self.assertEqual(A_ , '''This is a test''' )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
# fmt: off
lowercase : Union[str, Any] ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = 'facebook/m2m100_418M'
lowerCamelCase_ = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
lowerCamelCase_ = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def lowerCamelCase_ ( cls : List[str] ):
'''simple docstring'''
lowercase : Any =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
lowercase : List[str] =1
return cls
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple =self.tokenizer.get_vocab()
self.assertEqual(len(A_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , A_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : int ='''en'''
lowercase : str =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.assertIn(A_ , self.tokenizer.all_special_ids )
# fmt: off
lowercase : Optional[int] =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
lowercase : Dict =self.tokenizer.decode(A_ , skip_special_tokens=A_ )
lowercase : List[str] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
self.assertNotIn(self.tokenizer.eos_token , A_ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[str] =tempfile.mkdtemp()
lowercase : Optional[Any] =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(A_ )
lowercase : List[str] =MaMaaaTokenizer.from_pretrained(A_ )
self.assertDictEqual(new_tok.lang_token_to_id , A_ )
@require_torch
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Any ='''en'''
lowercase : List[Any] ='''fr'''
lowercase : Optional[int] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A_ , return_tensors='''pt''' )
lowercase : List[str] =shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowercase : Tuple =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any ='''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowercase : Union[str, Any] ='''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : str ='''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase : Tuple ='''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(A_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 92
|
def _UpperCAmelCase ( A ):
'''simple docstring'''
for i in range(len(A ) - 1 , 0 , -1 ):
UpperCAmelCase__ =False
for j in range(A , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase__ , UpperCAmelCase__ =unsorted[j - 1], unsorted[j]
UpperCAmelCase__ =True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase__ , UpperCAmelCase__ =unsorted[j + 1], unsorted[j]
UpperCAmelCase__ =True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 625
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase__ , lowercase__ ):
UpperCamelCase_ :List[Any] = 'dinat'
UpperCamelCase_ :Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : str , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=64 , SCREAMING_SNAKE_CASE_ : List[Any]=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE_ : Dict=[2, 4, 8, 16] , SCREAMING_SNAKE_CASE_ : Optional[Any]=7 , SCREAMING_SNAKE_CASE_ : int=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3.0 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Dict=1e-5 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ):
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = len(UpperCAmelCase__ )
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = kernel_size
lowerCAmelCase__ = dilations
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ = int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
lowerCAmelCase__ = layer_scale_init_value
lowerCAmelCase__ = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
lowerCAmelCase__ = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
| 700
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : int = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = 'codegen'
UpperCamelCase_ :int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=50_400 , SCREAMING_SNAKE_CASE_ : str=2_048 , SCREAMING_SNAKE_CASE_ : int=2_048 , SCREAMING_SNAKE_CASE_ : Any=4_096 , SCREAMING_SNAKE_CASE_ : List[Any]=28 , SCREAMING_SNAKE_CASE_ : str=16 , SCREAMING_SNAKE_CASE_ : str=64 , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Dict="gelu_new" , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Any=1e-5 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=50_256 , SCREAMING_SNAKE_CASE_ : Any=50_256 , SCREAMING_SNAKE_CASE_ : List[str]=False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = n_ctx
lowerCAmelCase__ = n_positions
lowerCAmelCase__ = n_embd
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = n_inner
lowerCAmelCase__ = rotary_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = resid_pdrop
lowerCAmelCase__ = embd_pdrop
lowerCAmelCase__ = attn_pdrop
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : PretrainedConfig , SCREAMING_SNAKE_CASE_ : str = "default" , SCREAMING_SNAKE_CASE_ : List[PatchingSpec] = None , SCREAMING_SNAKE_CASE_ : bool = False , ):
super().__init__(SCREAMING_SNAKE_CASE_ , task=SCREAMING_SNAKE_CASE_ , patching_specs=SCREAMING_SNAKE_CASE_ , use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config , '''pad_token_id''' , SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
lowerCAmelCase__ = 0
@property
def __snake_case ( self : str ):
lowerCAmelCase__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
lowerCAmelCase__ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __snake_case ( self : Dict ):
return self._config.n_layer
@property
def __snake_case ( self : Union[str, Any] ):
return self._config.n_head
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ):
lowerCAmelCase__ = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ = seqlen + 2
lowerCAmelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
lowerCAmelCase__ = common_inputs['''attention_mask''']
if self.use_past:
lowerCAmelCase__ = ordered_inputs['''attention_mask'''].dtype
lowerCAmelCase__ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def __snake_case ( self : Optional[int] ):
return 13
| 288
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : str = 1
__lowerCamelCase : str = 3
__lowerCamelCase : Dict = (32, 32)
__lowerCamelCase : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
@property
def _lowercase ( self : Optional[Any] ) -> Any:
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def _lowercase ( self : Union[str, Any] ) -> str:
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _lowercase ( self : Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
__lowerCamelCase : List[str] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_a )
@property
def _lowercase ( self : Tuple ) -> Dict:
def extract(*_a : Optional[Any] , **_a : List[Any] ):
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Any ) -> List[str]:
__lowerCamelCase : Any = torch.ones([0] )
def _lowercase ( self : List[str] , _a : Optional[Any] ) -> Dict:
self.pixel_values.to(_a )
return self
return Out()
return extract
def _lowercase ( self : Any ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.dummy_cond_unet
__lowerCamelCase : Tuple = PNDMScheduler(skip_prk_steps=_a )
__lowerCamelCase : Dict = self.dummy_vae
__lowerCamelCase : int = self.dummy_text_encoder
__lowerCamelCase : Tuple = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__lowerCamelCase : int = 77
__lowerCamelCase : Optional[int] = self.dummy_image.to(_a )
__lowerCamelCase : Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__lowerCamelCase : Dict = AltDiffusionImgaImgPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
__lowerCamelCase : Optional[int] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_a )
__lowerCamelCase : List[Any] = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
__lowerCamelCase : List[Any] = 'A painting of a squirrel eating a burger'
__lowerCamelCase : List[str] = torch.Generator(device=_a ).manual_seed(0 )
__lowerCamelCase : List[str] = alt_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_a , )
__lowerCamelCase : Optional[Any] = output.images
__lowerCamelCase : Dict = torch.Generator(device=_a ).manual_seed(0 )
__lowerCamelCase : Any = alt_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_a , return_dict=_a , )[0]
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : str = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCamelCase : Tuple = self.dummy_cond_unet
__lowerCamelCase : List[str] = PNDMScheduler(skip_prk_steps=_a )
__lowerCamelCase : List[str] = self.dummy_vae
__lowerCamelCase : Any = self.dummy_text_encoder
__lowerCamelCase : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__lowerCamelCase : int = 77
__lowerCamelCase : Optional[Any] = self.dummy_image.to(_a )
# put models in fp16
__lowerCamelCase : str = unet.half()
__lowerCamelCase : Union[str, Any] = vae.half()
__lowerCamelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
__lowerCamelCase : Optional[int] = AltDiffusionImgaImgPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
__lowerCamelCase : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_a )
__lowerCamelCase : Optional[int] = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
__lowerCamelCase : Optional[Any] = 'A painting of a squirrel eating a burger'
__lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCamelCase : Dict = alt_pipe(
[prompt] , generator=_a , num_inference_steps=2 , output_type='np' , image=_a , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _lowercase ( self : Optional[Any] ) -> str:
__lowerCamelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCamelCase : Union[str, Any] = init_image.resize((760, 504) )
__lowerCamelCase : Union[str, Any] = 'BAAI/AltDiffusion'
__lowerCamelCase : int = AltDiffusionImgaImgPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__lowerCamelCase : List[Any] = 'A fantasy landscape, trending on artstation'
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Any = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , generator=_a , output_type='np' , )
__lowerCamelCase : List[Any] = output.images[0]
__lowerCamelCase : Dict = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__lowerCamelCase : List[str] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Tuple ) -> Dict:
__lowerCamelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase : Optional[int] = init_image.resize((768, 512) )
__lowerCamelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
__lowerCamelCase : Any = 'BAAI/AltDiffusion'
__lowerCamelCase : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__lowerCamelCase : Union[str, Any] = 'A fantasy landscape, trending on artstation'
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Dict = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , generator=_a , output_type='np' , )
__lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 459
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowercase = 4
lowercase = 3
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( a_):
for shard in shards:
for i in range(a_):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ):
snake_case_ = int(os.environ['RANK'])
snake_case_ = int(os.environ['WORLD_SIZE'])
snake_case_ = ArgumentParser()
parser.add_argument('--streaming' , type=a_)
parser.add_argument('--local_rank' , type=a_)
parser.add_argument('--num_workers' , type=a_ , default=0)
snake_case_ = parser.parse_args()
snake_case_ = args.streaming
snake_case_ = args.num_workers
snake_case_ = {'shards': [f'''shard_{shard_idx}''' for shard_idx in range(a_)]}
snake_case_ = IterableDataset.from_generator(a_ , gen_kwargs=a_)
if not streaming:
snake_case_ = Dataset.from_list(list(a_))
snake_case_ = split_dataset_by_node(a_ , rank=a_ , world_size=a_)
snake_case_ = torch.utils.data.DataLoader(a_ , num_workers=a_)
snake_case_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
snake_case_ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
snake_case_ = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 198
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , a__=10_00 , ) -> Dict:
'''simple docstring'''
__snake_case :Any = parent
__snake_case :int = batch_size
__snake_case :Dict = seq_length
__snake_case :Dict = is_training
__snake_case :int = use_input_mask
__snake_case :int = use_token_type_ids
__snake_case :List[Any] = use_labels
__snake_case :Dict = vocab_size
__snake_case :int = hidden_size
__snake_case :Dict = num_hidden_layers
__snake_case :List[str] = num_attention_heads
__snake_case :List[Any] = intermediate_size
__snake_case :List[str] = hidden_act
__snake_case :Optional[int] = hidden_dropout_prob
__snake_case :Any = attention_probs_dropout_prob
__snake_case :int = max_position_embeddings
__snake_case :Optional[Any] = type_vocab_size
__snake_case :Any = type_sequence_label_size
__snake_case :Dict = initializer_range
__snake_case :Any = num_labels
__snake_case :List[str] = num_choices
__snake_case :List[Any] = scope
__snake_case :Union[str, Any] = range_bbox
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__snake_case :int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case :int = bbox[i, j, 3]
__snake_case :Dict = bbox[i, j, 1]
__snake_case :Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case :List[Any] = bbox[i, j, 2]
__snake_case :List[str] = bbox[i, j, 0]
__snake_case :List[str] = t
__snake_case :Optional[Any] = tf.convert_to_tensor(a__ )
__snake_case :Union[str, Any] = None
if self.use_input_mask:
__snake_case :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case :List[str] = None
if self.use_token_type_ids:
__snake_case :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case :str = None
__snake_case :str = None
__snake_case :List[str] = None
if self.use_labels:
__snake_case :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case :int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case :int = ids_tensor([self.batch_size] , self.num_choices )
__snake_case :Optional[Any] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
__snake_case :List[Any] = TFLayoutLMModel(config=a__ )
__snake_case :Dict = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case :Tuple = model(a__ , a__ , token_type_ids=a__ )
__snake_case :Dict = model(a__ , a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> int:
'''simple docstring'''
__snake_case :int = TFLayoutLMForMaskedLM(config=a__ )
__snake_case :Union[str, Any] = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[Any]:
'''simple docstring'''
__snake_case :Dict = self.num_labels
__snake_case :str = TFLayoutLMForSequenceClassification(config=a__ )
__snake_case :int = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> int:
'''simple docstring'''
__snake_case :Optional[Any] = self.num_labels
__snake_case :Union[str, Any] = TFLayoutLMForTokenClassification(config=a__ )
__snake_case :Union[str, Any] = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
__snake_case :Union[str, Any] = TFLayoutLMForQuestionAnswering(config=a__ )
__snake_case :Any = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) :int = config_and_inputs
__snake_case :int = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class snake_case__ ( lowercase_ , lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : Tuple = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowerCamelCase : Any = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : Dict = False
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = 10
def __lowercase ( self ) -> int:
'''simple docstring'''
__snake_case :Tuple = TFLayoutLMModelTester(self )
__snake_case :List[Any] = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __lowercase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def __lowercase ( self ) -> int:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :Dict = TFLayoutLMModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip("""Onnx compliancy broke with TF 2.10""" )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Optional[Any] = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
__snake_case :Dict = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__snake_case :Tuple = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
__snake_case :Union[str, Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__snake_case :Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
@slow
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :List[str] = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case :str = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case :str = model(input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
# test the sequence output on [0, :3, :3]
__snake_case :str = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
__snake_case :Optional[Any] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , a__ , atol=1e-3 ) )
@slow
def __lowercase ( self ) -> int:
'''simple docstring'''
__snake_case :List[Any] = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case :List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case :int = model(
input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__snake_case :Optional[int] = outputs.loss
__snake_case :List[Any] = (2,)
self.assertEqual(loss.shape , a__ )
# test the shape of the logits
__snake_case :List[str] = outputs.logits
__snake_case :str = (2, 2)
self.assertEqual(logits.shape , a__ )
@slow
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Dict = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13 )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case :Union[str, Any] = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case :Dict = model(
input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
# test the shape of the logits
__snake_case :List[str] = outputs.logits
__snake_case :Optional[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , a__ )
@slow
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :int = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case :str = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case :List[str] = model(input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
# test the shape of the logits
__snake_case :str = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , a__ )
self.assertEqual(outputs.end_logits.shape , a__ )
| 291
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Tuple = "dandelin/vilt-b32-finetuned-vqa"
lowerCamelCase : List[str] = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
lowerCamelCase : Optional[Any] = "image_qa"
lowerCamelCase : str = AutoProcessor
lowerCamelCase : Union[str, Any] = AutoModelForVisualQuestionAnswering
lowerCamelCase : Any = ["image", "text"]
lowerCamelCase : Dict = ["text"]
def __init__( self , *a__ , **a__ ) -> Any:
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*a__ , **a__ )
def __lowercase ( self , a__ , a__ ) -> int:
'''simple docstring'''
return self.pre_processor(a__ , a__ , return_tensors="""pt""" )
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model(**a__ ).logits
def __lowercase ( self , a__ ) -> Tuple:
'''simple docstring'''
__snake_case :str = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 291
| 1
|
def a__ ( A_ ):
'''simple docstring'''
return 10 - x * x
def a__ ( A_, A_ ):
'''simple docstring'''
if equation(SCREAMING_SNAKE_CASE__ ) * equation(SCREAMING_SNAKE_CASE__ ) >= 0:
raise ValueError("""Wrong space!""" )
__magic_name__ = a
while (b - a) >= 0.01:
# Find middle point
__magic_name__ = (a + b) / 2
# Check if middle point is root
if equation(SCREAMING_SNAKE_CASE__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(SCREAMING_SNAKE_CASE__ ) * equation(SCREAMING_SNAKE_CASE__ ) < 0:
__magic_name__ = c
else:
__magic_name__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 529
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603
| 0
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowercase__ :
UpperCamelCase_ = 42 # [batch_size x 3]
UpperCamelCase_ = 42 # [batch_size x 3]
UpperCamelCase_ = 42 # [batch_size x 3]
UpperCamelCase_ = 42 # [batch_size x 3]
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
def __A ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __A ( self : str ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCamelCase__ , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.shape
SCREAMING_SNAKE_CASE : List[str] = int(np.prod(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_coords()
SCREAMING_SNAKE_CASE : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE : List[str] = self.get_camera_rays(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = rays.view(UpperCamelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __A ( self : Dict , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = coords.view(UpperCamelCase__ , -1 , 2 )
SCREAMING_SNAKE_CASE : Any = self.resolution()
SCREAMING_SNAKE_CASE : str = self.fov()
SCREAMING_SNAKE_CASE : str = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE : List[str] = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE : int = fracs.view(UpperCamelCase__ , -1 , 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.z.view(UpperCamelCase__ , 1 , 3 )
+ self.x.view(UpperCamelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCamelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE : Tuple = directions / directions.norm(dim=-1 , keepdim=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCamelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCamelCase__ , *UpperCamelCase__ , 2 , 3 )
def __A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCamelCase__ , height=UpperCamelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
SCREAMING_SNAKE_CASE : int = np.array([np.sin(_lowercase ), np.cos(_lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE : Tuple = -z * 4
SCREAMING_SNAKE_CASE : Optional[int] = np.array([np.cos(_lowercase ), -np.sin(_lowercase ), 0.0] )
SCREAMING_SNAKE_CASE : Tuple = np.cross(_lowercase , _lowercase )
origins.append(_lowercase )
xs.append(_lowercase )
ys.append(_lowercase )
zs.append(_lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , width=_lowercase , height=_lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowercase )) , )
| 34
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 1
|
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__a: Union[str, Any] = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , '''sklearn''' )
return (preds == labels).mean()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , '''sklearn''' )
lowercase__ : Union[str, Any] = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ : Union[str, Any] = fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , '''sklearn''' )
lowercase__ : Tuple = pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0]
lowercase__ : int = spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , '''sklearn''' )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), F"""Predictions and labels have mismatched lengths {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "qqp":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(lowerCamelCase_ )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , '''sklearn''' )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(lowerCamelCase_ )
| 152
|
import os
from datetime import datetime as dt
from github import Github
__lowerCamelCase : Optional[int] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowerCamelCase_() -> List[str]:
UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase = g.get_repo("huggingface/diffusers" )
UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase = sorted(issue.get_comments() , key=lambda lowerCamelCase_ : i.created_at , reverse=lowerCamelCase_ )
UpperCAmelCase = comments[0] if len(lowerCamelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 323
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"vocab_file": "spiece.model"}
__lowerCAmelCase = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[Any]="<s>" , __UpperCamelCase : List[str]="</s>" , __UpperCamelCase : Dict="<unk>" , __UpperCamelCase : List[Any]="<sep>" , __UpperCamelCase : Optional[int]="<pad>" , __UpperCamelCase : Any="<cls>" , __UpperCamelCase : Union[str, Any]="<mask>" , __UpperCamelCase : Any=["<eop>", "<eod>"] , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Tuple , ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
_UpperCAmelCase = jieba
_UpperCAmelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase__ ( self : Optional[Any] ):
return len(self.sp_model )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Any , __UpperCamelCase : Any ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Dict ):
if self.remove_space:
_UpperCAmelCase = " ".join(inputs.strip().split() )
else:
_UpperCAmelCase = inputs
_UpperCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_UpperCAmelCase = unicodedata.normalize("NFKD" , __UpperCamelCase )
_UpperCAmelCase = "".join([c for c in outputs if not unicodedata.combining(__UpperCamelCase )] )
if self.do_lower_case:
_UpperCAmelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : str ):
_UpperCAmelCase = self.preprocess_text(__UpperCamelCase )
_UpperCAmelCase = self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
_UpperCAmelCase = []
for piece in pieces:
if len(__UpperCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_UpperCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_UpperCAmelCase = cur_pieces[1:]
else:
_UpperCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCamelCase )
else:
new_pieces.append(__UpperCamelCase )
return new_pieces
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Tuple ):
return self.sp_model.PieceToId(__UpperCamelCase )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : str ):
return self.sp_model.IdToPiece(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : str ):
_UpperCAmelCase = "".join(__UpperCamelCase ).replace(__UpperCamelCase , " " ).strip()
return out_string
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1, 1]
return ([0] * len(__UpperCamelCase )) + [1, 1]
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : str ):
_UpperCAmelCase = super()._decode(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 129
|
import re
from filelock import FileLock
try:
import nltk
__lowerCAmelCase = True
except (ImportError, ModuleNotFoundError):
__lowerCAmelCase = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
re.sub("<n>" , "" , _lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 129
| 1
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def snake_case_ ( A_ : list[float] ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = len(A_ )
for i in range(A_ ):
_lowerCamelCase : float = -1
for j in range(i + 1, A_ ):
if arr[i] < arr[j]:
_lowerCamelCase : int = arr[j]
break
result.append(A_ )
return result
def snake_case_ ( A_ : list[float] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
for i, outer in enumerate(A_ ):
_lowerCamelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_lowerCamelCase : List[str] = inner
break
result.append(A_ )
return result
def snake_case_ ( A_ : list[float] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = len(A_ )
_lowerCamelCase : list[float] = []
_lowerCamelCase : list[float] = [-1] * arr_size
for index in reversed(range(A_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_lowerCamelCase : Any = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase__ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 83
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Tuple ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = original_name.split(""".""" )[0]
_lowerCAmelCase = key.split(""".""" )
_lowerCAmelCase = int(key_list[key_list.index(snake_case_ ) - 2] )
_lowerCAmelCase = int(key_list[key_list.index(snake_case_ ) - 1] )
_lowerCAmelCase = orig_block_num - offset
_lowerCAmelCase = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""" , F"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = OrderedDict()
_lowerCAmelCase , _lowerCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
_lowerCAmelCase = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
_lowerCAmelCase = key[: key.find("""proj""" )]
_lowerCAmelCase = key.replace(snake_case_ , F"""patch_embeddings.{total_embed_found}.""" )
_lowerCAmelCase = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
_lowerCAmelCase = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
_lowerCAmelCase = replace_key_with_offset(snake_case_ , snake_case_ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
_lowerCAmelCase = replace_key_with_offset(snake_case_ , snake_case_ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
_lowerCAmelCase = replace_key_with_offset(snake_case_ , snake_case_ , """norm1""" , """before_norm""" )
if "norm2" in key:
_lowerCAmelCase = replace_key_with_offset(snake_case_ , snake_case_ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
_lowerCAmelCase = replace_key_with_offset(snake_case_ , snake_case_ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
_lowerCAmelCase = replace_key_with_offset(snake_case_ , snake_case_ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
_lowerCAmelCase = key.replace("""head""" , """classifier""" )
_lowerCAmelCase = value
return new_state_dict
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : int , snake_case_ : Optional[int] ) -> str:
"""simple docstring"""
_lowerCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = model_name[-3:]
_lowerCAmelCase = 1000
_lowerCAmelCase = """imagenet-1k-id2label.json"""
_lowerCAmelCase = (1, 1000)
# set config attributes
_lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_lowerCAmelCase = [2, 2, 6, 2]
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 4.0
_lowerCAmelCase = 0.9
elif size == "s24":
_lowerCAmelCase = [4, 4, 12, 4]
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 4.0
_lowerCAmelCase = 0.9
elif size == "s36":
_lowerCAmelCase = [6, 6, 18, 6]
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 4.0
_lowerCAmelCase = 1e-6
_lowerCAmelCase = 0.9
elif size == "m36":
_lowerCAmelCase = [6, 6, 18, 6]
_lowerCAmelCase = [96, 192, 384, 768]
_lowerCAmelCase = 4.0
_lowerCAmelCase = 1e-6
_lowerCAmelCase = 0.9_5
elif size == "m48":
_lowerCAmelCase = [8, 8, 24, 8]
_lowerCAmelCase = [96, 192, 384, 768]
_lowerCAmelCase = 4.0
_lowerCAmelCase = 1e-6
_lowerCAmelCase = 0.9_5
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor
_lowerCAmelCase = PoolFormerImageProcessor(crop_pct=snake_case_ )
# Prepare image
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=snake_case_ , return_tensors="""pt""" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
_lowerCAmelCase = torch.load(snake_case_ , map_location=torch.device("""cpu""" ) )
# rename keys
_lowerCAmelCase = rename_keys(snake_case_ )
# create HuggingFace model and load state dict
_lowerCAmelCase = PoolFormerForImageClassification(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# Define image processor
_lowerCAmelCase = PoolFormerImageProcessor(crop_pct=snake_case_ )
_lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
_lowerCAmelCase = model(snake_case_ )
_lowerCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_lowerCAmelCase = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
_lowerCAmelCase = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
_lowerCAmelCase = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
_lowerCAmelCase = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
_lowerCAmelCase = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 156
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
__lowercase : List[str] = ''''''
while len(__UpperCamelCase ) % 3 != 0:
__lowercase : Any = '''0''' + bin_string
__lowercase : str = [
bin_string[index : index + 3]
for index in range(len(__UpperCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__lowercase : Optional[Any] = 0
for index, val in enumerate(__UpperCamelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCamelCase ) )
oct_string += str(__UpperCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 523
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Union[str, Any] = 10
def _lowerCamelCase ( self ) -> str:
__lowercase : List[str] = [1, 2, 3, 4]
__lowercase : List[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowercase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> int:
__lowercase : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowercase : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : List[Any] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__lowercase ,__lowercase : Optional[Any] = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = ''''''
__lowercase ,__lowercase : Any = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[str] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__lowercase ,__lowercase : int = process_story(UpperCamelCase_ )
__lowercase : Union[str, Any] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : List[str] = ['''It was the best of times.''']
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Union[str, Any] = torch.tensor([1, 2, 3, 4] )
__lowercase : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowercase : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowercase : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[Any] = 1_01
__lowercase : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
__lowercase : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowercase : Optional[int] = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 523
| 1
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Any = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase__ : List[str] = tokenizer('''Hello there''' ,return_tensors='''np''' ).input_ids
UpperCAmelCase__ : List[str] = tokenizer('''Hi I am''' ,return_tensors='''np''' ).input_ids
UpperCAmelCase__ : List[str] = shift_tokens_right(lowerCamelCase_ ,model.config.pad_token_id ,model.config.decoder_start_token_id )
UpperCAmelCase__ : Union[str, Any] = model(lowerCamelCase_ ,decoder_input_ids=lowerCamelCase_ ).logits
UpperCAmelCase__ : str = optax.softmax_cross_entropy(lowerCamelCase_ ,onehot(lowerCamelCase_ ,logits.shape[-1] ) ).mean()
UpperCAmelCase__ : List[str] = -(labels.shape[-1] * loss.item())
UpperCAmelCase__ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 614
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''camembert'''
def __init__( self ,lowerCamelCase_=30522 ,lowerCamelCase_=768 ,lowerCamelCase_=12 ,lowerCamelCase_=12 ,lowerCamelCase_=3072 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=512 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1e-12 ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,lowerCamelCase_=2 ,lowerCamelCase_="absolute" ,lowerCamelCase_=True ,lowerCamelCase_=None ,**lowerCamelCase_ ,) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : List[Any] = classifier_dropout
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 614
| 1
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> int:
while a != 0:
_a , _a = b % a, a
return b
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> int:
if gcd(lowercase , lowercase ) != 1:
_a = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(lowercase )
_a , _a , _a = 1, 0, a
_a , _a , _a = 0, 1, m
while va != 0:
_a = ua // va
_a , _a , _a , _a , _a , _a = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 700
|
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Any , lowercase : str ) -> str:
# Return True if there is node that has not iterated.
_a = [False] * len(lowercase )
_a = [s]
_a = True
while queue:
_a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
_a = True
_a = u
return visited[t]
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] , lowercase : Dict ) -> Union[str, Any]:
_a = [-1] * (len(lowercase ))
_a = 0
_a = []
_a = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase , lowercase , lowercase , lowercase ):
_a = float("Inf" )
_a = sink
while s != source:
# Find the minimum value in select path
_a = min(lowercase , graph[parent[s]][s] )
_a = parent[s]
max_flow += path_flow
_a = sink
while v != source:
_a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_a = parent[v]
for i in range(len(lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 521
| 0
|
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _a (lowercase__ : Dict , lowercase__ : Any ) -> Any:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__snake_case = flax_key_tuple[:-1] + ('weight',)
__snake_case = torch.permute(lowercase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase__ ):
# linear layer
__snake_case = flax_key_tuple[:-1] + ('weight',)
__snake_case = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__snake_case = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def _a (lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
if "metadata" in layer:
__snake_case = layer.split('metadata' )
__snake_case = ''.join(split_layer[0] )[:-1]
__snake_case = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
__snake_case = layer.split('kvstore' )
__snake_case = ''.join(split_layer[0] )[:-1]
__snake_case = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
__snake_case = layer.split('/' )
__snake_case = '/'.join(split_layer[:-1] )
__snake_case = (split_layer[-1],)
if "kvstore/path" in layer:
__snake_case = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
__snake_case = 'file'
else:
__snake_case = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _a (lowercase__ : Optional[int] , lowercase__ : Dict ) -> Any:
"""simple docstring"""
__snake_case = rename_keys(lowercase__ )
__snake_case = {}
for k, v in current_block.items():
__snake_case = v
__snake_case = new_current_block
torch.save(lowercase__ , lowercase__ )
def _a (lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : str = WEIGHTS_NAME ) -> str:
"""simple docstring"""
__snake_case = convert_file_size_to_int(lowercase__ )
__snake_case = []
__snake_case = {}
__snake_case = 0
__snake_case = 0
os.makedirs(lowercase__ , exist_ok=lowercase__ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
__snake_case = serialization.msgpack_restore(fp.read() )['optimizer']['target']
__snake_case = flatten_dict(lowercase__ , sep='/' )
__snake_case = {}
for layer in checkpoint_info.keys():
__snake_case , __snake_case , __snake_case = get_key_and_tensorstore_dict(
lowercase__ , lowercase__ , lowercase__ )
if curr_real_layer_name in all_layers:
__snake_case = content
else:
__snake_case = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__snake_case = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__snake_case = torch.tensor(lowercase__ )
__snake_case = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__snake_case , __snake_case = rename_base_flax_keys(tuple(key.split('/' ) ) , lowercase__ )
__snake_case = '/'.join(lowercase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__snake_case = os.path.join(
lowercase__ , weights_name.replace('.bin' , f'-{len(lowercase__ )+1:05d}-of-???.bin' ) )
rename_and_save_block(lowercase__ , lowercase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__snake_case = {}
__snake_case = 0
__snake_case = raw_weights.to(getattr(lowercase__ , lowercase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__snake_case = os.path.join(lowercase__ , weights_name.replace('.bin' , f'-{len(lowercase__ )+1:05d}-of-???.bin' ) )
rename_and_save_block(lowercase__ , lowercase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__snake_case = {}
__snake_case = {}
for idx, shard in enumerate(lowercase__ ):
__snake_case = weights_name.replace(
'.bin' , f'-{idx+1:05d}-of-{len(lowercase__ ):05d}.bin' ) # len(sharded_state_dicts):05d}
__snake_case = os.path.join(lowercase__ , weights_name.replace('.bin' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
__snake_case = shard
for key in shard:
__snake_case = shard_file
# Add the metadata
__snake_case = {'total_size': total_size}
__snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowercase__ , lowercase__ ) , 'w' , encoding='utf-8' ) as f:
__snake_case = json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + '\n'
f.write(lowercase__ )
return metadata, index
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_a : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _a () -> Tuple:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__snake_case = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
__snake_case = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
__snake_case = TaTokenizer.from_pretrained('t5-small' )
__snake_case = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
__snake_case = tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__snake_case = model.generate(lowercase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 56
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Any = "gpt_neo"
lowercase : Optional[int] = ["past_key_values"]
lowercase : str = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _A=5_0_2_5_7 , _A=2_0_4_8 , _A=2_0_4_8 , _A=2_4 , _A=[[["global", "local"], 1_2]] , _A=1_6 , _A=None , _A=2_5_6 , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_layers
_SCREAMING_SNAKE_CASE =num_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =window_size
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =resid_dropout
_SCREAMING_SNAKE_CASE =embed_dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =classifier_dropout
_SCREAMING_SNAKE_CASE =layer_norm_epsilon
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =use_cache
_SCREAMING_SNAKE_CASE =bos_token_id
_SCREAMING_SNAKE_CASE =eos_token_id
_SCREAMING_SNAKE_CASE =attention_types
_SCREAMING_SNAKE_CASE =self.expand_attention_types_params(_A )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
@staticmethod
def UpperCamelCase_ ( _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _lowerCAmelCase(a : int , a : Tuple , a : Union[str, Any] , a : Optional[Any] ) -> str:
import torch
_SCREAMING_SNAKE_CASE =input.size()
_SCREAMING_SNAKE_CASE =len(a )
_SCREAMING_SNAKE_CASE =shape[dimension]
_SCREAMING_SNAKE_CASE =torch.arange(0 , a , a )
_SCREAMING_SNAKE_CASE =torch.div(sizedim - size , a , rounding_mode='''floor''' ) + 1
_SCREAMING_SNAKE_CASE =torch.arange(a ) + low_indices[:min_length][:, None]
_SCREAMING_SNAKE_CASE =[slice(a )] * rank
_SCREAMING_SNAKE_CASE =indices
_SCREAMING_SNAKE_CASE =input[s]
_SCREAMING_SNAKE_CASE =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(a )
def _lowerCAmelCase(a : Optional[Any] , a : Optional[int] ) -> List[str]:
import torch
_SCREAMING_SNAKE_CASE =torch.arange(1 , a )
_SCREAMING_SNAKE_CASE =torch.remainder(a , a )
_SCREAMING_SNAKE_CASE =remainders == 0
_SCREAMING_SNAKE_CASE =candidates[divisor_indices]
_SCREAMING_SNAKE_CASE =torch.max(a )
return largest_divisor, torch.div(a , a , rounding_mode='''floor''' )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='''inputs''' )
_SCREAMING_SNAKE_CASE ={0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_SCREAMING_SNAKE_CASE ={0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._config.num_heads
def UpperCamelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
_SCREAMING_SNAKE_CASE =OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_SCREAMING_SNAKE_CASE =seqlen + 2
_SCREAMING_SNAKE_CASE =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_SCREAMING_SNAKE_CASE =[
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
_SCREAMING_SNAKE_CASE =common_inputs['''attention_mask''']
if self.use_past:
_SCREAMING_SNAKE_CASE =ordered_inputs['''attention_mask'''].dtype
_SCREAMING_SNAKE_CASE =torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 1_3
| 255
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def __A ( a_ : int , a_ : int = 2 , a_ : int = 1 , a_ : int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('''The input value cannot be less than 2''' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(a_ : int , a_ : int , a_ : int ) -> int:
return (pow(a_ , 2 ) + step) % modulus
for _ in range(a_ ):
# These track the position within the cycle detection logic.
SCREAMING_SNAKE_CASE : str = seed
SCREAMING_SNAKE_CASE : List[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
SCREAMING_SNAKE_CASE : Any = rand_fn(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Dict = rand_fn(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Optional[int] = rand_fn(a_ , a_ , a_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
SCREAMING_SNAKE_CASE : str = gcd(hare - tortoise , a_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
SCREAMING_SNAKE_CASE : List[Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
lowerCamelCase__ : Any = parser.parse_args()
lowerCamelCase__ : str = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
lowerCamelCase__ : Any = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''')
| 18
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18
| 1
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = filter(lambda _UpperCamelCase : p.requires_grad , model.parameters() )
lowercase_ : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase__ = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if metric == "rouge2":
lowercase_ : int = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
lowercase_ : Dict = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
lowercase_ : int = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
lowercase_ : str = ModelCheckpoint(
dirpath=_UpperCamelCase , filename=_UpperCamelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=_UpperCamelCase , verbose=_UpperCamelCase , )
class _UpperCAmelCase ( pl.Callback ):
def lowerCAmelCase__ ( self : Tuple , a : Tuple , a : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCamelCase__ )
@rank_zero_only
def lowerCAmelCase__ ( self : int , a : Tuple , a : Optional[int] , a : int , a : int=True ):
'''simple docstring'''
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowercase_ : str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
lowercase_ : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase_ : List[Any] = od / "test_results.txt"
lowercase_ : str = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase_ : Dict = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowercase_ : Tuple = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCamelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCamelCase__ )
with open(lowerCamelCase__ , "a+" ) as writer:
for key in sorted(lowerCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase_ : Union[str, Any] = metrics[key]
if isinstance(lowerCamelCase__ , torch.Tensor ):
lowercase_ : Tuple = val.item()
lowercase_ : Optional[Any] = f"""{key}: {val:.6f}\n"""
writer.write(lowerCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
lowercase_ : Optional[int] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowerCamelCase__ )
@rank_zero_only
def lowerCAmelCase__ ( self : Tuple , a : Union[str, Any] , a : Any ):
'''simple docstring'''
try:
lowercase_ : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
lowercase_ : Union[str, Any] = pl_module.model.num_parameters()
lowercase_ : Optional[int] = count_trainable_parameters(lowerCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : Any ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCamelCase__ , lowerCamelCase__ , "test" )
@rank_zero_only
def lowerCAmelCase__ ( self : List[Any] , a : Any , a : Optional[Any] ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 620
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a : List[Any] = logging.get_logger(__name__)
a : Optional[Any] = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a : str = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a : Any = {
"""jukebox""": 5_1_2,
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE: List[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
SCREAMING_SNAKE_CASE: Dict = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=["v3", "v2", "v2"] , lowerCamelCase__=512 , lowerCamelCase__=5 , lowerCamelCase__="<|endoftext|>" , **lowerCamelCase__ , ):
lowerCAmelCase_: List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
super().__init__(
unk_token=lowerCamelCase__ , n_genres=lowerCamelCase__ , version=lowerCamelCase__ , max_n_lyric_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCAmelCase_: Any = version
lowerCAmelCase_: str = max_n_lyric_tokens
lowerCAmelCase_: Optional[Any] = n_genres
with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_: List[Any] = json.load(lowerCamelCase__ )
with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_: Tuple = json.load(lowerCamelCase__ )
with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_: Tuple = json.load(lowerCamelCase__ )
lowerCAmelCase_: List[str] = R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowerCAmelCase_: List[Any] = oov.replace(R"\-'" , R"\-+'" )
lowerCAmelCase_: List[Any] = regex.compile(lowerCamelCase__ )
lowerCAmelCase_: Dict = {v: k for k, v in self.artists_encoder.items()}
lowerCAmelCase_: List[str] = {v: k for k, v in self.genres_encoder.items()}
lowerCAmelCase_: int = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _a ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _a ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: Union[str, Any] = [self.artists_encoder.get(lowerCamelCase__ , 0 ) for artist in list_artists]
for genres in range(len(lowerCamelCase__ ) ):
lowerCAmelCase_: Tuple = [self.genres_encoder.get(lowerCamelCase__ , 0 ) for genre in list_genres[genres]]
lowerCAmelCase_: Any = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowerCAmelCase_: Optional[Any] = [[self.lyrics_encoder.get(lowerCamelCase__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _a ( self , lowerCamelCase__ ):
return list(lowerCamelCase__ )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: Union[str, Any] = self.prepare_for_tokenization(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: Any = self._tokenize(lowerCamelCase__ )
return artist, genre, lyrics
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowerCAmelCase_: List[Any] = artists[idx].lower()
lowerCAmelCase_: Tuple = [genres[idx].lower()]
else:
lowerCAmelCase_: Optional[Any] = self._normalize(artists[idx] ) + ".v2"
lowerCAmelCase_: int = [
self._normalize(lowerCamelCase__ ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowerCAmelCase_: List[Any] = regex.compile(R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
lowerCAmelCase_: Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
lowerCAmelCase_: Union[str, Any] = {vocab[index]: index + 1 for index in range(len(lowerCamelCase__ ) )}
lowerCAmelCase_: List[Any] = 0
lowerCAmelCase_: str = len(lowerCamelCase__ ) + 1
lowerCAmelCase_: Union[str, Any] = self.vocab
lowerCAmelCase_: List[str] = {v: k for k, v in self.vocab.items()}
lowerCAmelCase_: Optional[Any] = ""
else:
lowerCAmelCase_: Optional[int] = regex.compile(R"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
lowerCAmelCase_: int = self._run_strip_accents(lowerCamelCase__ )
lowerCAmelCase_: Optional[Any] = lyrics.replace("\\" , "\n" )
lowerCAmelCase_: List[str] = self.out_of_vocab.sub("" , lowerCamelCase__ ), [], []
return artists, genres, lyrics
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: int = unicodedata.normalize("NFD" , lowerCamelCase__ )
lowerCAmelCase_: Any = []
for char in text:
lowerCAmelCase_: Optional[Any] = unicodedata.category(lowerCamelCase__ )
if cat == "Mn":
continue
output.append(lowerCamelCase__ )
return "".join(lowerCamelCase__ )
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: Tuple = (
[chr(lowerCamelCase__ ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(lowerCamelCase__ ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(lowerCamelCase__ ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
lowerCAmelCase_: List[str] = frozenset(lowerCamelCase__ )
lowerCAmelCase_: Any = re.compile(R"_+" )
lowerCAmelCase_: List[Any] = "".join([c if c in accepted else "_" for c in text.lower()] )
lowerCAmelCase_: Union[str, Any] = pattern.sub("_" , lowerCamelCase__ ).strip("_" )
return text
def _a ( self , lowerCamelCase__ ):
return " ".join(lowerCamelCase__ )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
# Convert to TensorType
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: str = TensorType(lowerCamelCase__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
lowerCAmelCase_: Optional[int] = tf.constant
lowerCAmelCase_: str = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
lowerCAmelCase_: Optional[Any] = torch.tensor
lowerCAmelCase_: Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
lowerCAmelCase_: Optional[Any] = jnp.array
lowerCAmelCase_: List[str] = _is_jax
else:
lowerCAmelCase_: Optional[int] = np.asarray
lowerCAmelCase_: int = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowerCAmelCase_: Any = [inputs]
if not is_tensor(lowerCamelCase__ ):
lowerCAmelCase_: Optional[int] = as_tensor(lowerCamelCase__ )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__="pt" ):
lowerCAmelCase_: Any = [0, 0, 0]
lowerCAmelCase_: Dict = [artist] * len(self.version )
lowerCAmelCase_: int = [genres] * len(self.version )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: List[str] = self.tokenize(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: str = self._convert_token_to_id(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: List[Any] = [-INFINITY] * len(full_tokens[-1] )
lowerCAmelCase_: Optional[Any] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=lowerCamelCase__ )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_: Dict = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=lowerCamelCase__ ) )
lowerCAmelCase_: Optional[Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=lowerCamelCase__ ) )
lowerCAmelCase_: Dict = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=lowerCamelCase__ ) )
return (artists_file, genres_file, lyrics_file)
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: str = self.artists_decoder.get(lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = [self.genres_decoder.get(lowerCamelCase__ ) for genre in genres_index]
lowerCAmelCase_: Optional[int] = [self.lyrics_decoder.get(lowerCamelCase__ ) for character in lyric_index]
return artist, genres, lyrics
| 613
| 0
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] =MgpstrTokenizer
UpperCAmelCase__ : List[Any] =False
UpperCAmelCase__ : List[Any] ={}
UpperCAmelCase__ : Tuple =False
def _lowercase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = """tester"""
SCREAMING_SNAKE_CASE : Optional[int] = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def _lowercase ( self : Optional[int] ) ->Any:
"""simple docstring"""
pass
def _lowercase ( self : int ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE : Dict = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=snake_case_ )
self.assertEqual(len(snake_case_ ) , 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertTrue(special_token not in decoded )
def _lowercase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.get_input_output_texts(snake_case_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertNotEqual(len(snake_case_ ) , 0 )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(text_a.replace(""" """ , """""" ) , snake_case_ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def _lowercase ( self : Any ) ->Tuple:
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def _lowercase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
pass
| 720
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class a__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float =0.0
UpperCAmelCase__ : int =1
UpperCAmelCase__ : int =1
UpperCAmelCase__ : bool =True
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : jnp.dtype =jnp.floataa
def _lowercase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Any = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : List[Any] = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxResnetBlockaD(
in_channels=UpperCAmelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = resnets
SCREAMING_SNAKE_CASE : Optional[int] = attentions
if self.add_downsample:
SCREAMING_SNAKE_CASE : Tuple = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=True ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
SCREAMING_SNAKE_CASE : List[str] = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = attn(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE : int = self.downsamplers_a(UpperCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class a__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float =0.0
UpperCAmelCase__ : int =1
UpperCAmelCase__ : bool =True
UpperCAmelCase__ : jnp.dtype =jnp.floataa
def _lowercase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxResnetBlockaD(
in_channels=UpperCAmelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = resnets
if self.add_downsample:
SCREAMING_SNAKE_CASE : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any=True ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ()
for resnet in self.resnets:
SCREAMING_SNAKE_CASE : List[str] = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE : Dict = self.downsamplers_a(UpperCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class a__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float =0.0
UpperCAmelCase__ : int =1
UpperCAmelCase__ : int =1
UpperCAmelCase__ : bool =True
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : jnp.dtype =jnp.floataa
def _lowercase ( self : int ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE : Any = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : Tuple = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = resnets
SCREAMING_SNAKE_CASE : List[Any] = attentions
if self.add_upsample:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=True ) ->Tuple:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
SCREAMING_SNAKE_CASE : str = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE : Dict = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE : Tuple = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = attn(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
if self.add_upsample:
SCREAMING_SNAKE_CASE : Tuple = self.upsamplers_a(UpperCAmelCase__ )
return hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float =0.0
UpperCAmelCase__ : int =1
UpperCAmelCase__ : bool =True
UpperCAmelCase__ : jnp.dtype =jnp.floataa
def _lowercase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE : str = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = resnets
if self.add_upsample:
SCREAMING_SNAKE_CASE : int = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=True ) ->Union[str, Any]:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
SCREAMING_SNAKE_CASE : Dict = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE : Any = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE : Optional[int] = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
if self.add_upsample:
SCREAMING_SNAKE_CASE : Optional[int] = self.upsamplers_a(UpperCAmelCase__ )
return hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : float =0.0
UpperCAmelCase__ : int =1
UpperCAmelCase__ : int =1
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : jnp.dtype =jnp.floataa
def _lowercase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
SCREAMING_SNAKE_CASE : List[str] = []
for _ in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Tuple = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = resnets
SCREAMING_SNAKE_CASE : Tuple = attentions
def __call__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str=True ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.resnets[0](UpperCAmelCase__ , UpperCAmelCase__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
SCREAMING_SNAKE_CASE : Optional[int] = attn(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
return hidden_states
| 446
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (UniPCMultistepScheduler,)
SCREAMING_SNAKE_CASE_ = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self :Tuple , **lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any ={
'num_train_timesteps': 1_000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**lowerCamelCase_ )
return config
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :int=0 , **lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =dict(self.forward_default_kwargs )
lowerCamelCase__ : int =kwargs.pop('num_inference_steps' , lowerCamelCase_ )
lowerCamelCase__ : List[Any] =self.dummy_sample
lowerCamelCase__ : Optional[int] =0.1 * sample
lowerCamelCase__ : Optional[Any] =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : Any =self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : int =scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
lowerCamelCase__ : Union[str, Any] =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
lowerCamelCase__ : str =scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
lowerCamelCase__ : Optional[Any] =dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase__ , lowerCamelCase__ : Tuple =sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase__ : List[str] =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Optional[int] =new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :List[Any]=0 , **lowerCamelCase_ :str ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =dict(self.forward_default_kwargs )
lowerCamelCase__ : Union[str, Any] =kwargs.pop('num_inference_steps' , lowerCamelCase_ )
lowerCamelCase__ : Tuple =self.dummy_sample
lowerCamelCase__ : Any =0.1 * sample
lowerCamelCase__ : int =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : List[Any] =self.get_scheduler_config()
lowerCamelCase__ : Optional[Any] =scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase__ : Optional[int] =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase__ : Optional[Any] =dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase__ : Dict =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : List[Any] =new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :str=None , **lowerCamelCase_ :int ):
"""simple docstring"""
if scheduler is None:
lowerCamelCase__ : List[str] =self.scheduler_classes[0]
lowerCamelCase__ : str =self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : str =scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : Any =self.scheduler_classes[0]
lowerCamelCase__ : Optional[int] =self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =10
lowerCamelCase__ : Union[str, Any] =self.dummy_model()
lowerCamelCase__ : Optional[Any] =self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ : Dict =model(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =dict(self.forward_default_kwargs )
lowerCamelCase__ : Any =kwargs.pop('num_inference_steps' , lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : Optional[int] =self.get_scheduler_config()
lowerCamelCase__ : Any =scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =self.dummy_sample
lowerCamelCase__ : Dict =0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , 'set_timesteps' ):
lowerCamelCase__ : Tuple =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase__ : List[Any] =[residual + 0.2, residual + 0.15, residual + 0.10]
lowerCamelCase__ : List[Any] =dummy_past_residuals[: scheduler.config.solver_order]
lowerCamelCase__ : Union[str, Any] =scheduler.timesteps[5]
lowerCamelCase__ : Dict =scheduler.timesteps[6]
lowerCamelCase__ : Optional[int] =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : int =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =UniPCMultistepScheduler(**self.get_scheduler_config() )
lowerCamelCase__ : Tuple =self.full_loop(scheduler=lowerCamelCase_ )
lowerCamelCase__ : Tuple =torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
lowerCamelCase__ : List[str] =DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase__ : Optional[int] =DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ : Optional[int] =DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ : Optional[Any] =UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ : Dict =self.full_loop(scheduler=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , )
lowerCamelCase__ : int =self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self.full_loop()
lowerCamelCase__ : List[Any] =torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self.full_loop(prediction_type='v_prediction' )
lowerCamelCase__ : Optional[int] =torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.10_14 ) < 1e-3
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self.scheduler_classes[0]
lowerCamelCase__ : int =self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 )
lowerCamelCase__ : Union[str, Any] =scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : str =10
lowerCamelCase__ : Any =self.dummy_model()
lowerCamelCase__ : Optional[int] =self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ : Tuple =model(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : Any =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self :List[Any] , **lowerCamelCase_ :Dict ):
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : str =self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : str =scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 174
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """gpt_neo"""
SCREAMING_SNAKE_CASE_ = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self :Union[str, Any] , lowerCamelCase_ :Any=50_257 , lowerCamelCase_ :Union[str, Any]=2_048 , lowerCamelCase_ :int=2_048 , lowerCamelCase_ :Optional[int]=24 , lowerCamelCase_ :int=[[["global", "local"], 12]] , lowerCamelCase_ :List[str]=16 , lowerCamelCase_ :int=None , lowerCamelCase_ :Tuple=256 , lowerCamelCase_ :Union[str, Any]="gelu_new" , lowerCamelCase_ :Optional[Any]=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :int=1e-5 , lowerCamelCase_ :int=0.02 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Tuple=50_256 , lowerCamelCase_ :Tuple=50_256 , **lowerCamelCase_ :Tuple , ):
"""simple docstring"""
lowerCamelCase__ : str =vocab_size
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Union[str, Any] =hidden_size
lowerCamelCase__ : Union[str, Any] =num_layers
lowerCamelCase__ : List[str] =num_heads
lowerCamelCase__ : List[str] =intermediate_size
lowerCamelCase__ : Dict =window_size
lowerCamelCase__ : str =activation_function
lowerCamelCase__ : str =resid_dropout
lowerCamelCase__ : int =embed_dropout
lowerCamelCase__ : Optional[Any] =attention_dropout
lowerCamelCase__ : Dict =classifier_dropout
lowerCamelCase__ : List[str] =layer_norm_epsilon
lowerCamelCase__ : Optional[int] =initializer_range
lowerCamelCase__ : List[str] =use_cache
lowerCamelCase__ : Tuple =bos_token_id
lowerCamelCase__ : List[str] =eos_token_id
lowerCamelCase__ : List[str] =attention_types
lowerCamelCase__ : str =self.expand_attention_types_params(lowerCamelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : str ) ->Any:
import torch
lowerCamelCase__ : List[str] =input.size()
lowerCamelCase__ : Optional[int] =len(snake_case_ )
lowerCamelCase__ : Optional[int] =shape[dimension]
lowerCamelCase__ : Tuple =torch.arange(0 , snake_case_ , snake_case_ )
lowerCamelCase__ : List[Any] =torch.div(sizedim - size , snake_case_ , rounding_mode='floor' ) + 1
lowerCamelCase__ : str =torch.arange(snake_case_ ) + low_indices[:min_length][:, None]
lowerCamelCase__ : str =[slice(snake_case_ )] * rank
lowerCamelCase__ : Tuple =indices
lowerCamelCase__ : List[str] =input[s]
lowerCamelCase__ : Dict =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : int ) ->int:
import torch
lowerCamelCase__ : Optional[int] =torch.arange(1 , snake_case_ )
lowerCamelCase__ : str =torch.remainder(snake_case_ , snake_case_ )
lowerCamelCase__ : List[Any] =remainders == 0
lowerCamelCase__ : Optional[int] =candidates[divisor_indices]
lowerCamelCase__ : Any =torch.max(snake_case_ )
return largest_divisor, torch.div(snake_case_ , snake_case_ , rounding_mode='floor' )
class A_ ( A__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='inputs' )
lowerCamelCase__ : Dict ={0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCamelCase__ : int ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
return self._config.num_heads
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =super(lowerCamelCase_ , self ).generate_dummy_inputs(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ : Optional[int] =OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Optional[Any] =seqlen + 2
lowerCamelCase__ : Dict =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ : Dict =[
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(self.num_layers )
]
lowerCamelCase__ : Optional[int] =common_inputs['attention_mask']
if self.use_past:
lowerCamelCase__ : Any =ordered_inputs['attention_mask'].dtype
lowerCamelCase__ : Any =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
return 13
| 174
| 1
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.weight.shape
SCREAMING_SNAKE_CASE__ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = emb.weight.data
return lin_layer
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE__ = key.replace('moe_layer.experts.0' , F'ffn.experts.expert_{expert_idx}' )
else:
SCREAMING_SNAKE_CASE__ = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
SCREAMING_SNAKE_CASE__ = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE__ = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE__ = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE__ = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE__ = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE__ = key.replace('final_layer_norm' , 'ff_layer_norm' )
SCREAMING_SNAKE_CASE__ = state_dict[old_key]
return new_dict
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = WEIGHTS_NAME ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
for expert in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = switch_checkpoint_path + F'-rank-{expert}.pt'
if os.path.isfile(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase_ )['model']
remove_ignore_keys_(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = rename_fairseq_keys(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCamelCase_ , weights_name.replace('.bin' , F'-{len(UpperCamelCase_ )+1:05d}-of-???.bin' ) )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(UpperCamelCase_ )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , weights_name.replace('.bin' , F'-{len(UpperCamelCase_ )+1:05d}-of-???.bin' ) )
SCREAMING_SNAKE_CASE__ = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = rename_fairseq_keys(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(UpperCamelCase_ ) == 1:
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(UpperCamelCase_ , UpperCamelCase_ )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE__ = {}
for idx, shard in enumerate(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = weights_name.replace('.bin' , F'-{idx+1:05d}-of-{len(UpperCamelCase_ ):05d}.bin' )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , weights_name.replace('.bin' , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(UpperCamelCase_ , os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
for key in shard:
SCREAMING_SNAKE_CASE__ = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE__ = {'total_size': total_size}
SCREAMING_SNAKE_CASE__ = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , 'w' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE__ = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + '\n'
f.write(UpperCamelCase_ )
return metadata, index
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__snake_case = parser.parse_args()
__snake_case ,__snake_case = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
__snake_case = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
__snake_case = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 400
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> list[float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(UpperCamelCase_ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(UpperCamelCase_ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(UpperCamelCase_ )
if len(UpperCamelCase_ ) != rowsa:
SCREAMING_SNAKE_CASE__ = (
'Number of initial values must be equal to number of rows in coefficient '
F'matrix but received {len(UpperCamelCase_ )} and {rowsa}'
)
raise ValueError(UpperCamelCase_ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
SCREAMING_SNAKE_CASE__ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = table.shape
strictly_diagonally_dominant(UpperCamelCase_ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = []
for row in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = 0
for col in range(UpperCamelCase_ ):
if col == row:
SCREAMING_SNAKE_CASE__ = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ = (temp + val) / denom
new_val.append(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = new_val
return [float(UpperCamelCase_ ) for i in new_val]
def _lowercase ( UpperCamelCase_ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = table.shape
SCREAMING_SNAKE_CASE__ = True
for i in range(0 , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 400
| 1
|
def __snake_case ( lowerCAmelCase_ ) -> float:
SCREAMING_SNAKE_CASE__ = 0
while len(lowerCAmelCase_ ) > 1:
SCREAMING_SNAKE_CASE__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
SCREAMING_SNAKE_CASE__ = files.index(min(lowerCAmelCase_ ) )
temp += files[min_index]
files.pop(lowerCAmelCase_ )
files.append(lowerCAmelCase_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]=13 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : int=30 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Tuple=[0.5, 0.5, 0.5] ,lowerCamelCase__ : Optional[int]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_UpperCamelCase : Dict = size if size is not None else {'height': 18, 'width': 18}
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = image_size
_UpperCamelCase : Optional[Any] = min_resolution
_UpperCamelCase : Dict = max_resolution
_UpperCamelCase : Tuple = do_resize
_UpperCamelCase : Optional[int] = size
_UpperCamelCase : Tuple = do_normalize
_UpperCamelCase : str = image_mean
_UpperCamelCase : int = image_std
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = ViTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'image_std' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'size' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
# Initialize image_processor
_UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
_UpperCamelCase : int = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
_UpperCamelCase : int = image_processor(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# Initialize image_processor
_UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
_UpperCamelCase : Union[str, Any] = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
_UpperCamelCase : Optional[Any] = image_processor(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
_UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : Any = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
_UpperCamelCase : Dict = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
_UpperCamelCase : Any = image_processor(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
| 195
| 0
|
import random
def lowercase_ (A : List[str] , A : Tuple , A : int ):
snake_case__ : List[str] = a[left_index]
snake_case__ : Optional[int] = left_index + 1
for j in range(left_index + 1 , A ):
if a[j] < pivot:
snake_case__ : Union[str, Any] = a[i], a[j]
i += 1
snake_case__ : Union[str, Any] = a[i - 1], a[left_index]
return i - 1
def lowercase_ (A : Union[str, Any] , A : int , A : Union[str, Any] ):
if left < right:
snake_case__ : Dict = random.randint(A , right - 1 )
snake_case__ : List[Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
snake_case__ : Any = partition(A , A , A )
quick_sort_random(
A , A , A ) # recursive quicksort to the left of the pivot point
quick_sort_random(
A , pivot_index + 1 , A ) # recursive quicksort to the right of the pivot point
def lowercase_ ():
snake_case__ : Any = input('Enter numbers separated by a comma:\n' ).strip()
snake_case__ : str = [int(A ) for item in user_input.split(',' )]
quick_sort_random(A , 0 , len(A ) )
print(A )
if __name__ == "__main__":
main()
| 716
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a_ :int = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
a_ :str = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split()
)
a_ :int = "|".join(sys.argv[1:])
a_ :int = re.compile(RF"""^({joined_dirs}).*?\.py$""")
a_ :str = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 243
| 0
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'encodec'
def __init__( self , __a=[1.5, 3.0, 6.0, 12.0, 24.0] , __a=2_40_00 , __a=1 , __a=False , __a=None , __a=None , __a=1_28 , __a=32 , __a=1 , __a=[8, 5, 4, 2] , __a="weight_norm" , __a=7 , __a=7 , __a=3 , __a=2 , __a=True , __a="reflect" , __a=2 , __a=2 , __a=1.0 , __a=10_24 , __a=None , __a=True , **__a , ) -> int:
'''simple docstring'''
_UpperCamelCase = target_bandwidths
_UpperCamelCase = sampling_rate
_UpperCamelCase = audio_channels
_UpperCamelCase = normalize
_UpperCamelCase = chunk_length_s
_UpperCamelCase = overlap
_UpperCamelCase = hidden_size
_UpperCamelCase = num_filters
_UpperCamelCase = num_residual_layers
_UpperCamelCase = upsampling_ratios
_UpperCamelCase = norm_type
_UpperCamelCase = kernel_size
_UpperCamelCase = last_kernel_size
_UpperCamelCase = residual_kernel_size
_UpperCamelCase = dilation_growth_rate
_UpperCamelCase = use_causal_conv
_UpperCamelCase = pad_mode
_UpperCamelCase = compress
_UpperCamelCase = num_lstm_layers
_UpperCamelCase = trim_right_ratio
_UpperCamelCase = codebook_size
_UpperCamelCase = codebook_dim if codebook_dim is not None else hidden_size
_UpperCamelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**__a)
@property
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10))
| 19
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def lowerCAmelCase( __lowerCamelCase ):
__a = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
f'''{test_file} instead.''' )
__a = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
__a = components[:-1] + [test_fn.replace('.py' , '' )]
__a = '.'.join(__lowerCamelCase )
return test_module_path
def lowerCAmelCase( __lowerCamelCase ):
__a = get_module_path(__lowerCamelCase )
__a = importlib.import_module(__lowerCamelCase )
return test_module
def lowerCAmelCase( __lowerCamelCase ):
__a = []
__a = get_test_module(__lowerCamelCase )
for attr in dir(__lowerCamelCase ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(__lowerCamelCase , __lowerCamelCase ) )
# sort with class names
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x.__name__ )
def lowerCAmelCase( __lowerCamelCase ):
__a = []
__a = get_test_module(__lowerCamelCase )
for attr in dir(__lowerCamelCase ):
__a = getattr(__lowerCamelCase , __lowerCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a = getattr(__lowerCamelCase , 'all_model_classes' , [] )
if len(__lowerCamelCase ) > 0:
test_classes.append(__lowerCamelCase )
# sort with class names
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x.__name__ )
def lowerCAmelCase( __lowerCamelCase ):
__a = get_test_classes(__lowerCamelCase )
__a = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x.__name__ )
def lowerCAmelCase( __lowerCamelCase ):
__a = test_class()
if hasattr(__lowerCamelCase , 'setUp' ):
test.setUp()
__a = None
if hasattr(__lowerCamelCase , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a = test.model_tester.__class__
return model_tester
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = get_test_classes(__lowerCamelCase )
__a = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__lowerCamelCase )
# sort with class names
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x.__name__ )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = get_test_classes_for_model(__lowerCamelCase , __lowerCamelCase )
__a = []
for test_class in test_classes:
__a = get_model_tester_from_test_class(__lowerCamelCase )
if tester_class is not None:
tester_classes.append(__lowerCamelCase )
# sort with class names
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x.__name__ )
def lowerCAmelCase( __lowerCamelCase ):
__a = get_test_classes(__lowerCamelCase )
__a = {test_class: get_model_tester_from_test_class(__lowerCamelCase ) for test_class in test_classes}
return test_tester_mapping
def lowerCAmelCase( __lowerCamelCase ):
__a = get_model_classes(__lowerCamelCase )
__a = {
model_class: get_test_classes_for_model(__lowerCamelCase , __lowerCamelCase ) for model_class in model_classes
}
return model_test_mapping
def lowerCAmelCase( __lowerCamelCase ):
__a = get_model_classes(__lowerCamelCase )
__a = {
model_class: get_tester_classes_for_model(__lowerCamelCase , __lowerCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCAmelCase( __lowerCamelCase ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return o
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return o.__name__
elif isinstance(__lowerCamelCase , (list, tuple) ):
return [to_json(__lowerCamelCase ) for x in o]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {to_json(__lowerCamelCase ): to_json(__lowerCamelCase ) for k, v in o.items()}
else:
return o
| 559
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__(self , __a , __a=7 , __a=3 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , __a=True , __a=1 / 2_55 , __a=True , ):
'''simple docstring'''
lowerCamelCase = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = min_resolution
lowerCamelCase = max_resolution
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = do_normalize
lowerCamelCase = image_mean
lowerCamelCase = image_std
lowerCamelCase = do_rescale
lowerCamelCase = rescale_factor
lowerCamelCase = do_pad
def _a (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a (self , __a , __a=False ):
'''simple docstring'''
if not batched:
lowerCamelCase = image_inputs[0]
if isinstance(__a , Image.Image ):
lowerCamelCase , lowerCamelCase = image.size
else:
lowerCamelCase , lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase = int(self.size["shortest_edge"] * h / w )
lowerCamelCase = self.size["shortest_edge"]
elif w > h:
lowerCamelCase = self.size["shortest_edge"]
lowerCamelCase = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase = self.size["shortest_edge"]
lowerCamelCase = self.size["shortest_edge"]
else:
lowerCamelCase = []
for image in image_inputs:
lowerCamelCase , lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase = max(__a , key=lambda __a : item[0] )[0]
lowerCamelCase = max(__a , key=lambda __a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = DeformableDetrImageProcessor if is_vision_available() else None
def _a (self ):
'''simple docstring'''
lowerCamelCase = DeformableDetrImageProcessingTester(self )
@property
def _a (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "do_rescale" ) )
self.assertTrue(hasattr(__a , "do_pad" ) )
self.assertTrue(hasattr(__a , "size" ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __a )
lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__a )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , __a )
def _a (self ):
'''simple docstring'''
pass
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(__a , batched=__a )
lowerCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase = json.loads(f.read() )
lowerCamelCase = {"image_id": 3_97_69, "annotations": target}
# encode them
lowerCamelCase = DeformableDetrImageProcessor()
lowerCamelCase = image_processing(images=__a , annotations=__a , return_tensors="pt" )
# verify pixel values
lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __a )
lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __a , atol=1E-4 ) )
# verify area
lowerCamelCase = torch.tensor([5887.9600, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __a ) )
# verify boxes
lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __a )
lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __a , atol=1E-3 ) )
# verify image_id
lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __a ) )
# verify is_crowd
lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __a ) )
# verify class_labels
lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __a ) )
# verify orig_size
lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __a ) )
# verify size
lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __a ) )
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase = json.loads(f.read() )
lowerCamelCase = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
lowerCamelCase = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase = DeformableDetrImageProcessor(format="coco_panoptic" )
lowerCamelCase = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors="pt" )
# verify pixel values
lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __a )
lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __a , atol=1E-4 ) )
# verify area
lowerCamelCase = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __a ) )
# verify boxes
lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __a )
lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __a , atol=1E-3 ) )
# verify image_id
lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __a ) )
# verify is_crowd
lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __a ) )
# verify class_labels
lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __a ) )
# verify masks
lowerCamelCase = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __a )
# verify orig_size
lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __a ) )
# verify size
lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __a ) )
| 700
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = SMALL_MODEL_IDENTIFIER
lowerCamelCase = "pt"
lowerCamelCase = "tf"
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "mock_framework"
# Framework provided - return whatever the user provides
lowerCamelCase = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def _a (self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
lowerCamelCase = FeaturesManager.determine_framework(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_torch_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase = MagicMock(return_value=__a )
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase = MagicMock(return_value=__a )
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
with self.assertRaises(__a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 484
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Dict = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Optional[str] = None ) ->Tuple:
snake_case_ = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case_ = Extractor
def snake_case__( self : Any , _UpperCamelCase : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case_ = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : bool ) ->bool:
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : bool = False ) ->str:
snake_case_ = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
snake_case_ = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : str ) ->bool:
...
@staticmethod
@abstractmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
...
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->List[Any]:
with open(_UpperCamelCase , '''rb''' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if not magic_number:
snake_case_ = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case_ = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : Union[Path, str] , **_UpperCamelCase : Any ) ->bool:
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def snake_case__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->List[str]:
def resolved(_UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_UpperCamelCase : Tuple , _UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case_ = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
snake_case_ = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [b"\x1F\x8B"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__( cls : List[str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bytes = b"" ) ->bool:
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , '''rb''' ) as fp:
snake_case_ = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case_ = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
snake_case_ = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case_ = zstd.ZstdDecompressor()
with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file:
with open(_UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__( cls : List[Any] ) ->List[str]:
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case__( _UpperCamelCase : Union[Path, str] , _UpperCamelCase : int ) ->Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def snake_case__( cls : Optional[Any] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : bool = False ) ->bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
snake_case_ = cls._get_magic_number_max_length()
snake_case_ = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def snake_case__( cls : Optional[int] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Union[Path, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
snake_case_ = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , )
snake_case_ = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case_ = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
from math import factorial
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(lowerCAmelCase__ ) // (factorial(lowerCAmelCase__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'fifty-two card deck is: {combinations(5_2, 5)}\n',
)
print(
'If a class of 40 students must be arranged into groups of',
f'4 for group projects, there are {combinations(4_0, 4)} ways',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'are {combinations(1_0, 3)} ways that first, second and',
'third place can be awarded.',
)
| 340
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = ["pixel_values"]
def __init__( self : str , A__ : bool = True , A__ : int = 3_2 , A__ : List[str]=PILImageResampling.BILINEAR , A__ : bool = True , **A__ : Tuple , ) -> None:
'''simple docstring'''
a__ : Optional[int] = do_resize
a__ : List[str] = do_rescale
a__ : Optional[int] = size_divisor
a__ : Any = resample
super().__init__(**A__ )
def __lowerCAmelCase ( self : int , A__ : np.ndarray , A__ : int , A__ : Optional[int] , A__ : Optional[ChannelDimension] = None , **A__ : List[Any] ) -> np.ndarray:
'''simple docstring'''
a__ , a__ : Optional[int] = get_image_size(A__ )
# Rounds the height and width down to the closest multiple of size_divisor
a__ : List[Any] = height // size_divisor * size_divisor
a__ : List[str] = width // size_divisor * size_divisor
a__ : int = resize(A__ , (new_h, new_w) , resample=A__ , data_format=A__ , **A__ )
return image
def __lowerCAmelCase ( self : int , A__ : np.ndarray , A__ : float , A__ : Optional[ChannelDimension] = None , **A__ : Optional[int] ) -> np.ndarray:
'''simple docstring'''
return rescale(image=A__ , scale=A__ , data_format=A__ , **A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , A__ : Optional[bool] = None , A__ : Optional[int] = None , A__ : int=None , A__ : Optional[bool] = None , A__ : Optional[Union[TensorType, str]] = None , A__ : ChannelDimension = ChannelDimension.FIRST , **A__ : str , ) -> BatchFeature:
'''simple docstring'''
a__ : Tuple = do_resize if do_resize is not None else self.do_resize
a__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
a__ : Tuple = size_divisor if size_divisor is not None else self.size_divisor
a__ : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
a__ : int = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
a__ : Any = [to_numpy_array(A__ ) for img in images]
if do_resize:
a__ : Optional[Any] = [self.resize(A__ , size_divisor=A__ , resample=A__ ) for image in images]
if do_rescale:
a__ : str = [self.rescale(A__ , scale=1 / 2_5_5 ) for image in images]
a__ : Dict = [to_channel_dimension_format(A__ , A__ ) for image in images]
a__ : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 340
| 1
|
"""simple docstring"""
def _A (__a ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 512
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["input_features", "attention_mask"]
def __init__( self : List[Any] , lowercase_ : Tuple=80 , lowercase_ : Optional[int]=16000 , lowercase_ : str=80 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=True , **lowercase_ : List[Any] , ):
'''simple docstring'''
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = num_mel_bins
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_ceptral_normalize
SCREAMING_SNAKE_CASE_ : Dict = normalize_means
SCREAMING_SNAKE_CASE_ : Dict = normalize_vars
SCREAMING_SNAKE_CASE_ : Dict = True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : np.ndarray , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(lowercase_).unsqueeze(0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = ta_kaldi.fbank(lowercase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : int , lowercase_ : Optional[bool] = True , lowercase_ : Optional[bool] = True , lowercase_ : float = 0.0 , ):
'''simple docstring'''
if normalize_means:
SCREAMING_SNAKE_CASE_ : Optional[int] = x[:input_length].mean(axis=0)
SCREAMING_SNAKE_CASE_ : List[str] = np.subtract(lowercase_ , lowercase_)
if normalize_vars:
SCREAMING_SNAKE_CASE_ : Optional[Any] = x[:input_length].std(axis=0)
SCREAMING_SNAKE_CASE_ : Tuple = np.divide(lowercase_ , lowercase_)
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE_ : str = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.astype(np.floataa)
return x
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : List[np.ndarray] , lowercase_ : Optional[np.ndarray] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowercase_ , lowercase_ , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(lowercase_ , lowercase_)
]
def __call__( self : Dict , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , **lowercase_ : List[str] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
SCREAMING_SNAKE_CASE_ : str = isinstance(lowercase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
SCREAMING_SNAKE_CASE_ : List[str] = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.asarray(lowercase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray):
SCREAMING_SNAKE_CASE_ : int = np.asarray(lowercase_ , dtype=np.floataa)
elif isinstance(lowercase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
SCREAMING_SNAKE_CASE_ : Optional[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : Optional[int] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE_ : Dict = [self._extract_fbank_features(lowercase_) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchFeature({'''input_features''': features})
SCREAMING_SNAKE_CASE_ : Dict = self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
# make sure list is in array format
SCREAMING_SNAKE_CASE_ : Tuple = padded_inputs.get('''input_features''')
if isinstance(input_features[0] , lowercase_):
SCREAMING_SNAKE_CASE_ : List[str] = [np.asarray(lowercase_ , dtype=np.floataa) for feature in input_features]
SCREAMING_SNAKE_CASE_ : Optional[int] = padded_inputs.get('''attention_mask''')
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any = [np.asarray(lowercase_ , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
np.array(lowercase_ , dtype=np.intaa)
if self._get_padding_strategies(lowercase_ , max_length=lowercase_) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE_ : Tuple = self.normalize(
padded_inputs['''input_features'''] , attention_mask=lowercase_)
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ : str = padded_inputs.convert_to_tensors(lowercase_)
return padded_inputs
| 512
| 1
|
import argparse
import json
from tqdm import tqdm
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=A__ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=A__ , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=A__ , help='''where to store parsed gold_data_path file''' , )
UpperCAmelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
UpperCAmelCase = json.load(A__ )
for dpr_record in tqdm(A__ ):
UpperCAmelCase = dpr_record['''question''']
UpperCAmelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(A__ ) + '''\n''' )
if __name__ == "__main__":
main()
| 391
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( A__: Optional[int] , A__: List[Any] , A__: str ):
'''simple docstring'''
UpperCAmelCase = LxmertConfig.from_json_file(A__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase = LxmertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A__ , A__ , A__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 391
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCamelCase_( unittest.TestCase, A__ ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = load_tool('''text-classification''' )
self.tool.setup()
_lowerCamelCase = load_tool('''text-classification''' , remote=lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(lowerCamelCase__ , '''positive''' )
def snake_case__ ( self ):
_lowerCamelCase = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(lowerCamelCase__ , '''positive''' )
def snake_case__ ( self ):
_lowerCamelCase = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(lowerCamelCase__ , '''positive''' )
def snake_case__ ( self ):
_lowerCamelCase = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(lowerCamelCase__ , '''positive''' )
| 661
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase_( A__, A__, A__ ):
'''simple docstring'''
lowercase__ : List[Any] = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = 5_0_2_5_7 , lowerCamelCase__ = 1_0_2_4 , lowerCamelCase__ = 7_6_8 , lowerCamelCase__ = 1_2 , lowerCamelCase__ = 1_2 , lowerCamelCase__ = None , lowerCamelCase__ = "gelu_new" , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 1e-5 , lowerCamelCase__ = 0.0_2 , lowerCamelCase__ = True , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = False , ):
super().__init__()
_lowerCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
_lowerCamelCase = prefix_inner_dim
_lowerCamelCase = prefix_hidden_dim
_lowerCamelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_lowerCamelCase = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_lowerCamelCase = GPTaConfig(
vocab_size=lowerCamelCase__ , n_positions=lowerCamelCase__ , n_embd=lowerCamelCase__ , n_layer=lowerCamelCase__ , n_head=lowerCamelCase__ , n_inner=lowerCamelCase__ , activation_function=lowerCamelCase__ , resid_pdrop=lowerCamelCase__ , embd_pdrop=lowerCamelCase__ , attn_pdrop=lowerCamelCase__ , layer_norm_epsilon=lowerCamelCase__ , initializer_range=lowerCamelCase__ , scale_attn_weights=lowerCamelCase__ , use_cache=lowerCamelCase__ , scale_attn_by_inverse_layer_idx=lowerCamelCase__ , reorder_and_upcast_attn=lowerCamelCase__ , )
_lowerCamelCase = GPTaLMHeadModel(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
_lowerCamelCase = self.transformer.transformer.wte(lowerCamelCase__ )
_lowerCamelCase = self.encode_prefix(lowerCamelCase__ )
_lowerCamelCase = self.decode_prefix(lowerCamelCase__ )
_lowerCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_lowerCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_lowerCamelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_lowerCamelCase = self.transformer(inputs_embeds=lowerCamelCase__ , labels=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
return torch.zeros(lowerCamelCase__ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
return self.encode_prefix(lowerCamelCase__ )
@torch.no_grad()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = torch.split(lowerCamelCase__ , 1 , dim=0 )
_lowerCamelCase = []
_lowerCamelCase = []
for feature in features:
_lowerCamelCase = self.decode_prefix(feature.to(lowerCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
_lowerCamelCase , _lowerCamelCase = self.generate_beam(
input_embeds=lowerCamelCase__ , device=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_lowerCamelCase = torch.stack(lowerCamelCase__ )
_lowerCamelCase = torch.stack(lowerCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = 5 , lowerCamelCase__ = 6_7 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = None , ):
_lowerCamelCase = eos_token_id
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = torch.ones(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.int )
_lowerCamelCase = torch.zeros(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
_lowerCamelCase = input_embeds
else:
_lowerCamelCase = self.transformer.transformer.wte(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
_lowerCamelCase = self.transformer(inputs_embeds=lowerCamelCase__ )
_lowerCamelCase = outputs.logits
_lowerCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_lowerCamelCase = logits.softmax(-1 ).log()
if scores is None:
_lowerCamelCase , _lowerCamelCase = logits.topk(lowerCamelCase__ , -1 )
_lowerCamelCase = generated.expand(lowerCamelCase__ , *generated.shape[1:] )
_lowerCamelCase , _lowerCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_lowerCamelCase = next_tokens
else:
_lowerCamelCase = tokens.expand(lowerCamelCase__ , *tokens.shape[1:] )
_lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_lowerCamelCase = -float(np.inf )
_lowerCamelCase = 0
_lowerCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_lowerCamelCase = scores_sum / seq_lengths[:, None]
_lowerCamelCase , _lowerCamelCase = scores_sum_average.view(-1 ).topk(lowerCamelCase__ , -1 )
_lowerCamelCase = next_tokens // scores_sum.shape[1]
_lowerCamelCase = seq_lengths[next_tokens_source]
_lowerCamelCase = next_tokens % scores_sum.shape[1]
_lowerCamelCase = next_tokens.unsqueeze(1 )
_lowerCamelCase = tokens[next_tokens_source]
_lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
_lowerCamelCase = generated[next_tokens_source]
_lowerCamelCase = scores_sum_average * seq_lengths
_lowerCamelCase = is_stopped[next_tokens_source]
_lowerCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_lowerCamelCase = torch.cat((generated, next_token_embed) , dim=1 )
_lowerCamelCase = is_stopped + next_tokens.eq(lowerCamelCase__ ).squeeze()
if is_stopped.all():
break
_lowerCamelCase = scores / seq_lengths
_lowerCamelCase = scores.argsort(descending=lowerCamelCase__ )
# tokens tensors are already padded to max_seq_length
_lowerCamelCase = [tokens[i] for i in order]
_lowerCamelCase = torch.stack(lowerCamelCase__ , dim=0 )
_lowerCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661
| 1
|
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=2 , snake_case__=4 , snake_case__="last" , snake_case__=True , snake_case__=None , snake_case__=0 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = parent
_SCREAMING_SNAKE_CASE : Any = batch_size
_SCREAMING_SNAKE_CASE : Any = seq_length
_SCREAMING_SNAKE_CASE : str = is_training
_SCREAMING_SNAKE_CASE : List[Any] = use_input_lengths
_SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
_SCREAMING_SNAKE_CASE : Dict = use_labels
_SCREAMING_SNAKE_CASE : List[str] = gelu_activation
_SCREAMING_SNAKE_CASE : Any = sinusoidal_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = causal
_SCREAMING_SNAKE_CASE : str = asm
_SCREAMING_SNAKE_CASE : Optional[Any] = n_langs
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Tuple = n_special
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : str = num_hidden_layers
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : int = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
_SCREAMING_SNAKE_CASE : Tuple = num_choices
_SCREAMING_SNAKE_CASE : Optional[int] = summary_type
_SCREAMING_SNAKE_CASE : Dict = use_proj
_SCREAMING_SNAKE_CASE : Optional[int] = scope
_SCREAMING_SNAKE_CASE : int = bos_token_id
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_lengths:
_SCREAMING_SNAKE_CASE : Any = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_SCREAMING_SNAKE_CASE : int = None
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , 2 ).float()
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = XLMModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case__ , langs=snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = XLMWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = XLMForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : List[str] = model(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = XLMForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : str = model(snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
_SCREAMING_SNAKE_CASE : List[str] = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((_SCREAMING_SNAKE_CASE) , ) : Any = result_with_labels.to_tuple()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((_SCREAMING_SNAKE_CASE) , ) : List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = XLMForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : Optional[int] = model(snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
_SCREAMING_SNAKE_CASE : List[Any] = XLMForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : str = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : Optional[int] = XLMForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : List[Any] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : Dict = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A__ = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_SCREAMING_SNAKE_CASE : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = XLMModelTester(self )
_SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=False , snake_case__=1 ):
"""simple docstring"""
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertListEqual(
[isinstance(snake_case__ , snake_case__ ) for iter_attentions in attentions] , [True] * len(snake_case__ ) )
self.assertEqual(len(snake_case__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case__ ):
# adds PAD dummy token
_SCREAMING_SNAKE_CASE : Dict = min_length + idx + 1
_SCREAMING_SNAKE_CASE : List[Any] = min_length + idx + 1
_SCREAMING_SNAKE_CASE : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case__ ) )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=False , snake_case__=1 ):
"""simple docstring"""
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertListEqual(
[isinstance(snake_case__ , snake_case__ ) for iter_hidden_states in hidden_states] , [True] * len(snake_case__ ) , )
self.assertEqual(len(snake_case__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case__ ):
# adds PAD dummy token
_SCREAMING_SNAKE_CASE : Dict = min_length + idx + 1
_SCREAMING_SNAKE_CASE : List[str] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case__ ) , )
pass
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : List[str] = XLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[14, 447]] , dtype=torch.long , device=snake_case__ ) # the president
_SCREAMING_SNAKE_CASE : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_SCREAMING_SNAKE_CASE : Any = model.generate(snake_case__ , do_sample=snake_case__ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case__ )
| 295
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class UpperCamelCase :
def __init__( self , snake_case__ = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = value
_SCREAMING_SNAKE_CASE : Node | None = None # Added in order to delete a node easier
_SCREAMING_SNAKE_CASE : Node | None = None
_SCREAMING_SNAKE_CASE : Node | None = None
def __repr__( self ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCamelCase :
def __init__( self , snake_case__ = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = root
def __str__( self ):
"""simple docstring"""
return str(self.root )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if new_children is not None: # reset its kids
_SCREAMING_SNAKE_CASE : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case__ ): # If it is the right children
_SCREAMING_SNAKE_CASE : Any = new_children
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_children
else:
_SCREAMING_SNAKE_CASE : Any = new_children
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.root is None
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = Node(snake_case__ ) # create a new Node
if self.empty(): # if Tree is empty
_SCREAMING_SNAKE_CASE : str = new_node # set its root
else: # Tree is not empty
_SCREAMING_SNAKE_CASE : Dict = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_SCREAMING_SNAKE_CASE : int = parent_node.left
else:
if parent_node.right is None:
_SCREAMING_SNAKE_CASE : str = new_node
break
else:
_SCREAMING_SNAKE_CASE : Optional[int] = parent_node.right
_SCREAMING_SNAKE_CASE : Any = parent_node
def __SCREAMING_SNAKE_CASE ( self , *snake_case__ ):
"""simple docstring"""
for value in values:
self.__insert(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_SCREAMING_SNAKE_CASE : List[Any] = node.left if value < node.value else node.right
return node
def __SCREAMING_SNAKE_CASE ( self , snake_case__ = None ):
"""simple docstring"""
if node is None:
if self.root is None:
return None
_SCREAMING_SNAKE_CASE : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
_SCREAMING_SNAKE_CASE : Dict = node.right
return node
def __SCREAMING_SNAKE_CASE ( self , snake_case__ = None ):
"""simple docstring"""
if node is None:
_SCREAMING_SNAKE_CASE : List[Any] = self.root
if self.root is None:
return None
if not self.empty():
_SCREAMING_SNAKE_CASE : Any = self.root
while node.left is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = node.left
return node
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.search(snake_case__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case__ , snake_case__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case__ , node.left )
else:
_SCREAMING_SNAKE_CASE : Dict = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_SCREAMING_SNAKE_CASE : List[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __SCREAMING_SNAKE_CASE ( self , snake_case__=None ):
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if node:
self.inorder(snake_case__ , node.left )
arr.append(node.value )
self.inorder(snake_case__ , node.right )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : list[int] = []
self.inorder(snake_case__ , snake_case__ ) # append all values to list using inorder traversal
return arr[k - 1]
def _lowerCAmelCase ( lowerCamelCase__ : Node | None ) -> list[Node]:
_SCREAMING_SNAKE_CASE : Optional[int] = []
if curr_node is not None:
_SCREAMING_SNAKE_CASE : int = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _lowerCAmelCase ( ) -> None:
_SCREAMING_SNAKE_CASE : Any = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
_SCREAMING_SNAKE_CASE : List[Any] = BinarySearchTree()
for i in testlist:
t.insert(lowerCamelCase__ )
# Prints all the elements of the list in order traversal
print(lowerCamelCase__ )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: ", t.get_max().value ) # type: ignore
print("Min Value: ", t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCamelCase__ )
print(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 295
| 1
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Any =torch.nn.Linear(1_0 , 1_0)
_UpperCAmelCase : Optional[int] =torch.optim.SGD(model.parameters() , 0.1)
_UpperCAmelCase : Tuple =Accelerator()
_UpperCAmelCase : int =accelerator.prepare(snake_case)
try:
pickle.loads(pickle.dumps(snake_case))
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}")
AcceleratorState._reset_state()
| 446
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
lowercase =[8, 5, 9, 7]
lowercase =[
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowercase =[
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , snake_case , snake_case , snake_case , ) -> None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =claim_vector
_UpperCAmelCase : Optional[int] =allocated_resources_table
_UpperCAmelCase : int =maximum_claim_table
def lowerCAmelCase ( self) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def lowerCAmelCase ( self) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def lowerCAmelCase ( self) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(snake_case))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def lowerCAmelCase ( self) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(snake_case): i for i in self.__need()}
def lowerCAmelCase ( self , **snake_case) -> None:
'''simple docstring'''
_UpperCAmelCase : Any =self.__need()
_UpperCAmelCase : str =self.__allocated_resources_table
_UpperCAmelCase : Dict =self.__available_resources()
_UpperCAmelCase : Tuple =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 5_0 + '\n')
while need_list:
_UpperCAmelCase : Any =False
for each_need in need_list:
_UpperCAmelCase : Any =True
for index, need in enumerate(snake_case):
if need > available_resources[index]:
_UpperCAmelCase : Optional[int] =False
break
if execution:
_UpperCAmelCase : Union[str, Any] =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_UpperCAmelCase : List[str] =original_need_index
print(f"Process {process_number + 1} is executing.")
# remove the process run from stack
need_list.remove(snake_case)
# update available/freed resources stack
_UpperCAmelCase : Any =np.array(snake_case) + np.array(
alloc_resources_table[process_number])
print(
'Updated available resource stack for processes: '
+ ' '.join([str(snake_case) for x in available_resources]))
break
if safe:
print('The process is in a safe state.\n')
else:
print('System in unsafe state. Aborting...\n')
break
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table')
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(snake_case) + 1}"
+ ' '.join(f"{it:>8}" for it in item)
+ '\n')
print(' ' * 9 + 'System Resource Table')
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(snake_case) + 1}"
+ ' '.join(f"{it:>8}" for it in item)
+ '\n')
print(
'Current Usage by Active Processes: '
+ ' '.join(str(snake_case) for x in self.__claim_vector))
print(
'Initial Available Resources: '
+ ' '.join(str(snake_case) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 446
| 1
|
from abc import ABC, abstractmethod
from typing import List, Optional
class snake_case_ (lowercase__ ):
"""simple docstring"""
def __init__( self):
"""simple docstring"""
self.test()
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Dict = False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase_ : Dict = self.advance()
if not self.does_advance(lowercase):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.")
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.update(lowercase)
counter += 1
if counter > 10000:
raise Exception("update() does not fulfill the constraint.")
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly.")
@abstractmethod
def A_ ( self):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
@abstractmethod
def A_ ( self ,lowercase):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
@abstractmethod
def A_ ( self ,lowercase):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
@abstractmethod
def A_ ( self):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
@abstractmethod
def A_ ( self):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
@abstractmethod
def A_ ( self ,lowercase=False):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
class snake_case_ (lowercase__ ):
"""simple docstring"""
def __init__( self ,lowercase):
"""simple docstring"""
super(lowercase ,self).__init__()
if not isinstance(lowercase ,lowercase) or len(lowercase) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""")
if any((not isinstance(lowercase ,lowercase) or token_id < 0) for token_id in token_ids):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""")
UpperCAmelCase_ : str = token_ids
UpperCAmelCase_ : Union[str, Any] = len(self.token_ids)
UpperCAmelCase_ : str = -1 # the index of the currently fulfilled step
UpperCAmelCase_ : str = False
def A_ ( self):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def A_ ( self ,lowercase):
"""simple docstring"""
if not isinstance(lowercase ,lowercase):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(lowercase)}""")
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def A_ ( self ,lowercase):
"""simple docstring"""
if not isinstance(lowercase ,lowercase):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(lowercase)}""")
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Dict = False
if self.does_advance(lowercase):
self.fulfilled_idx += 1
UpperCAmelCase_ : Union[str, Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Optional[Any] = completed
else:
# failed to make progress.
UpperCAmelCase_ : Optional[Any] = True
self.reset()
return stepped, completed, reset
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[int] = 0
def A_ ( self):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def A_ ( self ,lowercase=False):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = PhrasalConstraint(self.token_ids)
if stateful:
UpperCAmelCase_ : Tuple = self.seqlen
UpperCAmelCase_ : Any = self.fulfilled_idx
UpperCAmelCase_ : Optional[int] = self.completed
return new_constraint
class snake_case_ :
"""simple docstring"""
def __init__( self ,lowercase ,lowercase=True):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = max([len(lowercase) for one in nested_token_ids])
UpperCAmelCase_ : Tuple = {}
for token_ids in nested_token_ids:
UpperCAmelCase_ : Optional[int] = root
for tidx, token_id in enumerate(lowercase):
if token_id not in level:
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : int = level[token_id]
if no_subsets and self.has_subsets(lowercase ,lowercase):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F""" {nested_token_ids}.""")
UpperCAmelCase_ : Optional[int] = root
def A_ ( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.trie
for current_token in current_seq:
UpperCAmelCase_ : Union[str, Any] = start[current_token]
UpperCAmelCase_ : str = list(start.keys())
return next_tokens
def A_ ( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Any = self.next_tokens(lowercase)
return len(lowercase) == 0
def A_ ( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = list(root.values())
if len(lowercase) == 0:
return 1
else:
return sum([self.count_leaves(lowercase) for nn in next_nodes])
def A_ ( self ,lowercase ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.count_leaves(lowercase)
return len(lowercase) != leaf_count
class snake_case_ (lowercase__ ):
"""simple docstring"""
def __init__( self ,lowercase):
"""simple docstring"""
super(lowercase ,self).__init__()
if not isinstance(lowercase ,lowercase) or len(lowercase) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""")
if any(not isinstance(lowercase ,lowercase) for token_ids in nested_token_ids):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""")
if any(
any((not isinstance(lowercase ,lowercase) or token_id < 0) for token_id in token_ids)
for token_ids in nested_token_ids):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""")
UpperCAmelCase_ : Dict = DisjunctiveTrie(lowercase)
UpperCAmelCase_ : Tuple = nested_token_ids
UpperCAmelCase_ : Optional[int] = self.trie.max_height
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Any = False
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : int = self.trie.next_tokens(self.current_seq)
if len(lowercase) == 0:
return None
else:
return token_list
def A_ ( self ,lowercase):
"""simple docstring"""
if not isinstance(lowercase ,lowercase):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase)}""")
UpperCAmelCase_ : List[str] = self.trie.next_tokens(self.current_seq)
return token_id in next_tokens
def A_ ( self ,lowercase):
"""simple docstring"""
if not isinstance(lowercase ,lowercase):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase)}""")
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : List[Any] = False
if self.does_advance(lowercase):
self.current_seq.append(lowercase)
UpperCAmelCase_ : Tuple = True
else:
UpperCAmelCase_ : Union[str, Any] = True
self.reset()
UpperCAmelCase_ : Optional[Any] = self.trie.reached_leaf(self.current_seq)
UpperCAmelCase_ : List[str] = completed
return stepped, completed, reset
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = []
def A_ ( self):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq)
def A_ ( self ,lowercase=False):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = DisjunctiveConstraint(self.token_ids)
if stateful:
UpperCAmelCase_ : int = self.seqlen
UpperCAmelCase_ : Optional[int] = self.current_seq
UpperCAmelCase_ : List[str] = self.completed
return new_constraint
class snake_case_ :
"""simple docstring"""
def __init__( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : int = constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase_ : Union[str, Any] = max([c.seqlen for c in constraints])
UpperCAmelCase_ : Optional[int] = len(lowercase)
UpperCAmelCase_ : Optional[Any] = False
self.init_state()
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Union[str, Any] = [constraint.copy(stateful=lowercase) for constraint in self.constraints]
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Dict = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints) * self.max_seqlen) + add
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase_ : Dict = constraint.advance()
if isinstance(lowercase ,lowercase):
token_list.append(lowercase)
elif isinstance(lowercase ,lowercase):
token_list.extend(lowercase)
else:
UpperCAmelCase_ : str = self.inprogress_constraint.advance()
if isinstance(lowercase ,lowercase):
token_list.append(lowercase)
elif isinstance(lowercase ,lowercase):
token_list.extend(lowercase)
if len(lowercase) == 0:
return None
else:
return token_list
def A_ ( self ,lowercase):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.add(lowercase)
# the entire list of constraints are fulfilled
if self.completed:
break
def A_ ( self ,lowercase):
"""simple docstring"""
if not isinstance(lowercase ,lowercase):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""")
UpperCAmelCase_ , UpperCAmelCase_ : int = False, False
if self.completed:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.inprogress_constraint.update(lowercase)
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowercase))
UpperCAmelCase_ : Any = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint)
UpperCAmelCase_ : Optional[int] = None
if len(self.pending_constraints) == 0:
# we're done!
UpperCAmelCase_ : str = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(lowercase):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = pending_constraint.update(lowercase)
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true.")
if complete:
self.complete_constraints.append(lowercase)
UpperCAmelCase_ : List[str] = None
if not complete and stepped:
UpperCAmelCase_ : List[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase_ : int = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase_ : int = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def A_ ( self ,lowercase=True):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = ConstraintListState(self.constraints) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase_ : str = [
constraint.copy(stateful=lowercase) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase_ : Union[str, Any] = self.inprogress_constraint.copy(stateful=lowercase)
UpperCAmelCase_ : Tuple = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 455
|
import logging
from transformers import PretrainedConfig
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """bertabs"""
def __init__( self ,lowercase=30522 ,lowercase=512 ,lowercase=6 ,lowercase=512 ,lowercase=8 ,lowercase=512 ,lowercase=0.2 ,lowercase=6 ,lowercase=768 ,lowercase=8 ,lowercase=2048 ,lowercase=0.2 ,**lowercase ,):
"""simple docstring"""
super().__init__(**lowercase)
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = max_pos
UpperCAmelCase_ : List[str] = enc_layers
UpperCAmelCase_ : List[str] = enc_hidden_size
UpperCAmelCase_ : Any = enc_heads
UpperCAmelCase_ : Any = enc_ff_size
UpperCAmelCase_ : Optional[int] = enc_dropout
UpperCAmelCase_ : List[str] = dec_layers
UpperCAmelCase_ : List[Any] = dec_hidden_size
UpperCAmelCase_ : List[str] = dec_heads
UpperCAmelCase_ : List[Any] = dec_ff_size
UpperCAmelCase_ : List[str] = dec_dropout
| 455
| 1
|
"""simple docstring"""
def __magic_name__ ( UpperCamelCase : list[int] ) -> list[int]:
a__ = len(UpperCamelCase )
for i in range(UpperCamelCase ):
for j in range(i + 1 , UpperCamelCase ):
if numbers[j] < numbers[i]:
a__ , a__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : int = input('Enter numbers separated by a comma:\n').strip()
a : Dict = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 273
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase(_lowercase ):
__snake_case: jnp.ndarray
__snake_case: jnp.ndarray
class lowercase(nn.Module ):
__snake_case: int
__snake_case: Tuple[int] = (16, 32, 96, 256)
__snake_case: jnp.dtype = jnp.floataa
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a__ = []
for i in range(len(self.block_out_channels ) - 1 ):
a__ = self.block_out_channels[i]
a__ = self.block_out_channels[i + 1]
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
a__ = blocks
a__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
a__ = self.conv_in(__SCREAMING_SNAKE_CASE )
a__ = nn.silu(__SCREAMING_SNAKE_CASE )
for block in self.blocks:
a__ = block(__SCREAMING_SNAKE_CASE )
a__ = nn.silu(__SCREAMING_SNAKE_CASE )
a__ = self.conv_out(__SCREAMING_SNAKE_CASE )
return embedding
@flax_register_to_config
class lowercase(nn.Module , _lowercase , _lowercase ):
__snake_case: int = 32
__snake_case: int = 4
__snake_case: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__snake_case: Union[bool, Tuple[bool]] = False
__snake_case: Tuple[int] = (320, 640, 1280, 1280)
__snake_case: int = 2
__snake_case: Union[int, Tuple[int]] = 8
__snake_case: Optional[Union[int, Tuple[int]]] = None
__snake_case: int = 1280
__snake_case: float = 0.0
__snake_case: bool = False
__snake_case: jnp.dtype = jnp.floataa
__snake_case: bool = True
__snake_case: int = 0
__snake_case: str = "rgb"
__snake_case: Tuple[int] = (16, 32, 96, 256)
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> FrozenDict:
"""simple docstring"""
a__ = (1, self.in_channels, self.sample_size, self.sample_size)
a__ = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
a__ = jnp.ones((1,) , dtype=jnp.intaa )
a__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
a__ = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
a__ , a__ = jax.random.split(__SCREAMING_SNAKE_CASE )
a__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )["params"]
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = self.block_out_channels
a__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a__ = self.num_attention_heads or self.attention_head_dim
# input
a__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a__ = FlaxTimestepEmbedding(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
a__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
a__ = self.only_cross_attention
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ = (num_attention_heads,) * len(self.down_block_types )
# down
a__ = []
a__ = []
a__ = block_out_channels[0]
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
for i, down_block_type in enumerate(self.down_block_types ):
a__ = output_channel
a__ = block_out_channels[i]
a__ = i == len(__SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a__ = FlaxCrossAttnDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
a__ = FlaxDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__SCREAMING_SNAKE_CASE )
for _ in range(self.layers_per_block ):
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
if not is_final_block:
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
a__ = down_blocks
a__ = controlnet_down_blocks
# mid
a__ = block_out_channels[-1]
a__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
a__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
a__ = jnp.flip(__SCREAMING_SNAKE_CASE , axis=1 )
# 1. time
if not isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ):
a__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0:
a__ = timesteps.astype(dtype=jnp.floataa )
a__ = jnp.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
a__ = self.time_proj(__SCREAMING_SNAKE_CASE )
a__ = self.time_embedding(__SCREAMING_SNAKE_CASE )
# 2. pre-process
a__ = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
a__ = self.conv_in(__SCREAMING_SNAKE_CASE )
a__ = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
a__ = self.controlnet_cond_embedding(__SCREAMING_SNAKE_CASE )
sample += controlnet_cond
# 3. down
a__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ , a__ = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
else:
a__ , a__ = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
a__ = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
# 5. contronet blocks
a__ = ()
for down_block_res_sample, controlnet_block in zip(__SCREAMING_SNAKE_CASE , self.controlnet_down_blocks ):
a__ = controlnet_block(__SCREAMING_SNAKE_CASE )
controlnet_down_block_res_samples += (down_block_res_sample,)
a__ = controlnet_down_block_res_samples
a__ = self.controlnet_mid_block(__SCREAMING_SNAKE_CASE )
# 6. scaling
a__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__SCREAMING_SNAKE_CASE , mid_block_res_sample=__SCREAMING_SNAKE_CASE )
| 273
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowerCAmelCase : Dict = (720, 1_280) # Height, Width
__lowerCAmelCase : int = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowerCAmelCase : str = 1 / 100
__lowerCAmelCase : Optional[Any] = ""
__lowerCAmelCase : str = ""
__lowerCAmelCase : Optional[int] = ""
__lowerCAmelCase : int = 250
def UpperCAmelCase_ ( ) -> None:
__lowercase : Tuple = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index in range(SCREAMING_SNAKE_CASE_ ):
__lowercase : str = random.sample(range(len(SCREAMING_SNAKE_CASE_ ) ) , 4 )
__lowercase : Tuple = update_image_and_anno(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , filter_scale=SCREAMING_SNAKE_CASE_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowercase : int = random_chars(32 )
__lowercase : List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowercase : Optional[int] = F'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(F'{file_root}.jpg' , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
__lowercase : List[str] = []
for anno in new_annos:
__lowercase : Dict = anno[3] - anno[1]
__lowercase : Optional[int] = anno[4] - anno[2]
__lowercase : Optional[Any] = anno[1] + width / 2
__lowercase : Tuple = anno[2] + height / 2
__lowercase : int = F'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F'{file_root}.txt' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple[list, list]:
__lowercase : int = []
__lowercase : Any = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
__lowercase : List[str] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
__lowercase : Union[str, Any] = in_file.readlines()
__lowercase : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , F'{label_name}.jpg' )
__lowercase : int = []
for obj_list in obj_lists:
__lowercase : Optional[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
__lowercase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
__lowercase : List[Any] = float(obj[2] ) - float(obj[4] ) / 2
__lowercase : Any = float(obj[1] ) + float(obj[3] ) / 2
__lowercase : Any = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , ) -> tuple[list, list, str]:
__lowercase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowercase : Union[str, Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase : Any = int(scale_x * output_size[1] )
__lowercase : str = int(scale_y * output_size[0] )
__lowercase : Dict = []
__lowercase : List[str] = []
for i, index in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowercase : List[Any] = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE_ )
__lowercase : Tuple = all_annos[index]
__lowercase : int = cva.imread(SCREAMING_SNAKE_CASE_ )
if i == 0: # top-left
__lowercase : Optional[Any] = cva.resize(SCREAMING_SNAKE_CASE_ , (divid_point_x, divid_point_y) )
__lowercase : int = img
for bbox in img_annos:
__lowercase : Union[str, Any] = bbox[1] * scale_x
__lowercase : List[str] = bbox[2] * scale_y
__lowercase : Union[str, Any] = bbox[3] * scale_x
__lowercase : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowercase : Tuple = cva.resize(SCREAMING_SNAKE_CASE_ , (output_size[1] - divid_point_x, divid_point_y) )
__lowercase : Tuple = img
for bbox in img_annos:
__lowercase : Any = scale_x + bbox[1] * (1 - scale_x)
__lowercase : Optional[Any] = bbox[2] * scale_y
__lowercase : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
__lowercase : Dict = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowercase : List[str] = cva.resize(SCREAMING_SNAKE_CASE_ , (divid_point_x, output_size[0] - divid_point_y) )
__lowercase : int = img
for bbox in img_annos:
__lowercase : Union[str, Any] = bbox[1] * scale_x
__lowercase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__lowercase : List[Any] = bbox[3] * scale_x
__lowercase : Union[str, Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowercase : Optional[Any] = cva.resize(
SCREAMING_SNAKE_CASE_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowercase : int = img
for bbox in img_annos:
__lowercase : int = scale_x + bbox[1] * (1 - scale_x)
__lowercase : List[Any] = scale_y + bbox[2] * (1 - scale_y)
__lowercase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__lowercase : List[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowercase : Any = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCAmelCase_ ( __lowerCAmelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__lowercase : Tuple = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 702
|
from math import factorial
__lowerCAmelCase : Dict = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(__lowerCAmelCase ) )
def UpperCAmelCase_ ( ) -> int:
__lowercase : int = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __lowerCAmelCase ) if sum_of_digit_factorial(__lowerCAmelCase ) == i )
if __name__ == "__main__":
print(F'{solution() = }')
| 284
| 0
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A_ :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=True , snake_case=False , snake_case=False , snake_case=False , ):
lowercase = 4
lowercase = 32
lowercase = (32, 32)
lowercase = torch.manual_seed(0 )
lowercase = torch.device(snake_case )
lowercase = (batch_size, num_channels) + sizes
lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case )
lowercase = {'hidden_states': hidden_states}
if include_temb:
lowercase = 128
lowercase = randn_tensor((batch_size, temb_channels) , generator=snake_case , device=snake_case )
if include_res_hidden_states_tuple:
lowercase = torch.manual_seed(1 )
lowercase = (randn_tensor(snake_case , generator=snake_case , device=snake_case ),)
if include_encoder_hidden_states:
lowercase = floats_tensor((batch_size, 32, 32) ).to(snake_case )
if include_skip_sample:
lowercase = randn_tensor(((batch_size, 3) + sizes) , generator=snake_case , device=snake_case )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
lowercase = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
lowercase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self.prepare_init_args_and_inputs_for_common()
lowercase = self.block_class(**snake_case )
unet_block.to(snake_case )
unet_block.eval()
with torch.no_grad():
lowercase = unet_block(**snake_case )
if isinstance(snake_case , snake_case ):
lowercase = output[0]
self.assertEqual(output.shape , self.output_shape )
lowercase = output[0, -1, -3:, -3:]
lowercase = torch.tensor(snake_case ).to(snake_case )
assert torch_all_close(output_slice.flatten() , snake_case , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.prepare_init_args_and_inputs_for_common()
lowercase = self.block_class(**snake_case )
model.to(snake_case )
model.train()
lowercase = model(**snake_case )
if isinstance(snake_case , snake_case ):
lowercase = output[0]
lowercase = torch.device(snake_case )
lowercase = randn_tensor(output.shape , device=snake_case )
lowercase = torch.nn.functional.mse_loss(snake_case , snake_case )
loss.backward()
| 84
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84
| 1
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def _lowercase ( UpperCamelCase__ : np.ndarray, UpperCamelCase__ : tuple[int, int], UpperCamelCase__ : tuple[int, int], UpperCamelCase__ : bool, ):
__A : Optional[Any] = grid.shape
__A : List[Any] = [-1, 1, 0, 0]
__A : Optional[int] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__A : int = [(0, source)], set()
__A : Any = np.full((rows, cols), np.inf )
__A : int = 0
__A : Any = np.empty((rows, cols), dtype=UpperCamelCase__ )
__A : List[Any] = None
while queue:
(__A) : List[Any] = heappop(UpperCamelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__A : Tuple = []
while (x, y) != source:
path.append((x, y) )
__A : int = predecessors[x, y]
path.append(UpperCamelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase__ ) ):
__A : List[Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__A : str = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase__, (dist + 1, (nx, ny)) )
__A : int = dist + 1
__A : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
'''simple docstring'''
from collections.abc import Sequence
def _lowercase ( UpperCamelCase__ : Sequence[float], UpperCamelCase__ : float ):
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase__ ) )
def _lowercase ( UpperCamelCase__ : Sequence[float], UpperCamelCase__ : float ):
__A : Optional[Any] = 0.0
for coeff in reversed(UpperCamelCase__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : Any = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : Any = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 540
| 0
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] ={
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """align_text_model"""
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : int = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE_ : Dict = use_cache
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pad_token_id
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
SCREAMING_SNAKE_CASE_ : str = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """align_vision_model"""
def __init__( self , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 6_0_0 , lowerCAmelCase__ = 2.0 , lowerCAmelCase__ = 3.1 , lowerCAmelCase__ = 8 , lowerCAmelCase__ = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase__ = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , lowerCAmelCase__ = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , lowerCAmelCase__ = [] , lowerCAmelCase__ = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase__ = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase__ = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase__ = 0.25 , lowerCAmelCase__ = "swish" , lowerCAmelCase__ = 2_5_6_0 , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 0.001 , lowerCAmelCase__ = 0.99 , lowerCAmelCase__ = 0.2 , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : int = width_coefficient
SCREAMING_SNAKE_CASE_ : int = depth_coefficient
SCREAMING_SNAKE_CASE_ : Optional[int] = depth_divisor
SCREAMING_SNAKE_CASE_ : int = kernel_sizes
SCREAMING_SNAKE_CASE_ : Optional[int] = in_channels
SCREAMING_SNAKE_CASE_ : Dict = out_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = depthwise_padding
SCREAMING_SNAKE_CASE_ : List[Any] = strides
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_block_repeats
SCREAMING_SNAKE_CASE_ : int = expand_ratios
SCREAMING_SNAKE_CASE_ : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dim
SCREAMING_SNAKE_CASE_ : int = pooling_type
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : int = batch_norm_eps
SCREAMING_SNAKE_CASE_ : int = batch_norm_momentum
SCREAMING_SNAKE_CASE_ : Tuple = drop_connect_rate
SCREAMING_SNAKE_CASE_ : List[str] = sum(lowerCAmelCase__ ) * 4
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
SCREAMING_SNAKE_CASE_ : Optional[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """align"""
_UpperCAmelCase = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=6_4_0 , lowerCAmelCase__=1.0 , lowerCAmelCase__=0.02 , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if text_config is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
SCREAMING_SNAKE_CASE_ : List[Any] = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
SCREAMING_SNAKE_CASE_ : Tuple = AlignTextConfig(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = AlignVisionConfig(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = projection_dim
SCREAMING_SNAKE_CASE_ : str = temperature_init_value
SCREAMING_SNAKE_CASE_ : int = initializer_range
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : Any = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type
return output
| 101
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679
| 0
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = "geglu" ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = "layer_norm" ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
super().__init__()
snake_case : Any = only_cross_attention
snake_case : Tuple = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
snake_case : str = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case : Optional[Any] = AdaLayerNorm(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
elif self.use_ada_layer_norm_zero:
snake_case : Tuple = AdaLayerNormZero(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
else:
snake_case : Dict = nn.LayerNorm(SCREAMING_SNAKE_CASE_ ,elementwise_affine=SCREAMING_SNAKE_CASE_ )
snake_case : str = Attention(
query_dim=SCREAMING_SNAKE_CASE_ ,heads=SCREAMING_SNAKE_CASE_ ,dim_head=SCREAMING_SNAKE_CASE_ ,dropout=SCREAMING_SNAKE_CASE_ ,bias=SCREAMING_SNAKE_CASE_ ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=SCREAMING_SNAKE_CASE_ ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case : List[str] = (
AdaLayerNorm(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if self.use_ada_layer_norm
else nn.LayerNorm(SCREAMING_SNAKE_CASE_ ,elementwise_affine=SCREAMING_SNAKE_CASE_ )
)
snake_case : List[Any] = Attention(
query_dim=SCREAMING_SNAKE_CASE_ ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=SCREAMING_SNAKE_CASE_ ,dim_head=SCREAMING_SNAKE_CASE_ ,dropout=SCREAMING_SNAKE_CASE_ ,bias=SCREAMING_SNAKE_CASE_ ,upcast_attention=SCREAMING_SNAKE_CASE_ ,) # is self-attn if encoder_hidden_states is none
else:
snake_case : Tuple = None
snake_case : Union[str, Any] = None
# 3. Feed-forward
snake_case : Union[str, Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE_ ,elementwise_affine=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = FeedForward(SCREAMING_SNAKE_CASE_ ,dropout=SCREAMING_SNAKE_CASE_ ,activation_fn=SCREAMING_SNAKE_CASE_ ,final_dropout=SCREAMING_SNAKE_CASE_ )
# let chunk size default to None
snake_case : Tuple = None
snake_case : Optional[Any] = 0
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[Any] = chunk_size
snake_case : Dict = dim
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
snake_case : str = self.norma(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
elif self.use_ada_layer_norm_zero:
snake_case : List[str] = self.norma(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,hidden_dtype=hidden_states.dtype )
else:
snake_case : Union[str, Any] = self.norma(SCREAMING_SNAKE_CASE_ )
snake_case : str = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case : Optional[Any] = self.attna(
SCREAMING_SNAKE_CASE_ ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
if self.use_ada_layer_norm_zero:
snake_case : Optional[Any] = gate_msa.unsqueeze(1 ) * attn_output
snake_case : Optional[int] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case : Union[str, Any] = (
self.norma(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE_ )
)
snake_case : List[str] = self.attna(
SCREAMING_SNAKE_CASE_ ,encoder_hidden_states=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
snake_case : Optional[int] = attn_output + hidden_states
# 3. Feed-forward
snake_case : Tuple = self.norma(SCREAMING_SNAKE_CASE_ )
if self.use_ada_layer_norm_zero:
snake_case : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
snake_case : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case : Tuple = torch.cat(
[self.ff(SCREAMING_SNAKE_CASE_ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE_ ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,)
else:
snake_case : List[str] = self.ff(SCREAMING_SNAKE_CASE_ )
if self.use_ada_layer_norm_zero:
snake_case : str = gate_mlp.unsqueeze(1 ) * ff_output
snake_case : str = ff_output + hidden_states
return hidden_states
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 4 ,SCREAMING_SNAKE_CASE_ = 0.0 ,SCREAMING_SNAKE_CASE_ = "geglu" ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
super().__init__()
snake_case : Optional[int] = int(dim * mult )
snake_case : Union[str, Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case : Optional[int] = GELU(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if activation_fn == "gelu-approximate":
snake_case : Dict = GELU(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,approximate="""tanh""" )
elif activation_fn == "geglu":
snake_case : Optional[int] = GEGLU(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
elif activation_fn == "geglu-approximate":
snake_case : List[str] = ApproximateGELU(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = nn.ModuleList([] )
# project in
self.net.append(SCREAMING_SNAKE_CASE_ )
# project dropout
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE_ ) )
# project out
self.net.append(nn.Linear(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for module in self.net:
snake_case : Any = module(SCREAMING_SNAKE_CASE_ )
return hidden_states
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "none" ):
'''simple docstring'''
super().__init__()
snake_case : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = approximate
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE_ ,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[Any] = self.proj(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = self.gelu(SCREAMING_SNAKE_CASE_ )
return hidden_states
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Tuple = nn.Linear(SCREAMING_SNAKE_CASE_ ,dim_out * 2 )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[Any] = self.proj(SCREAMING_SNAKE_CASE_ ).chunk(2 ,dim=-1 )
return hidden_states * self.gelu(SCREAMING_SNAKE_CASE_ )
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : int = nn.Linear(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[Any] = self.proj(SCREAMING_SNAKE_CASE_ )
return x * torch.sigmoid(1.7_02 * x )
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Optional[int] = nn.Embedding(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : int = nn.SiLU()
snake_case : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE_ ,embedding_dim * 2 )
snake_case : int = nn.LayerNorm(SCREAMING_SNAKE_CASE_ ,elementwise_affine=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE_ ) ) )
snake_case : Union[str, Any] = torch.chunk(SCREAMING_SNAKE_CASE_ ,2 )
snake_case : Union[str, Any] = self.norm(SCREAMING_SNAKE_CASE_ ) * (1 + scale) + shift
return x
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Tuple = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = nn.SiLU()
snake_case : Tuple = nn.Linear(SCREAMING_SNAKE_CASE_ ,6 * embedding_dim ,bias=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = nn.LayerNorm(SCREAMING_SNAKE_CASE_ ,elementwise_affine=SCREAMING_SNAKE_CASE_ ,eps=1E-6 )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
snake_case : int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,hidden_dtype=SCREAMING_SNAKE_CASE_ ) ) )
snake_case : Any = emb.chunk(6 ,dim=1 )
snake_case : Any = self.norm(SCREAMING_SNAKE_CASE_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 1E-5 ):
'''simple docstring'''
super().__init__()
snake_case : Any = num_groups
snake_case : Tuple = eps
if act_fn is None:
snake_case : int = None
else:
snake_case : List[str] = get_activation(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE_ ,out_dim * 2 )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if self.act:
snake_case : List[Any] = self.act(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = self.linear(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = emb[:, :, None, None]
snake_case : List[Any] = emb.chunk(2 ,dim=1 )
snake_case : Optional[int] = F.group_norm(SCREAMING_SNAKE_CASE_ ,self.num_groups ,eps=self.eps )
snake_case : List[str] = x * (1 + scale) + shift
return x
| 721
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _A ( snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = TextToVideoSDPipeline
__lowerCamelCase : Dict = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowerCamelCase : str = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
snake_case : int = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule="""scaled_linear""" ,clip_sample=SCREAMING_SNAKE_CASE_ ,set_alpha_to_one=SCREAMING_SNAKE_CASE_ ,)
torch.manual_seed(0 )
snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
snake_case : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
snake_case : List[str] = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
snake_case : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
snake_case : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
snake_case : int = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
snake_case : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : str = self.get_dummy_components()
snake_case : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE_ )
snake_case : str = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = """np"""
snake_case : List[str] = sd_pipe(**SCREAMING_SNAKE_CASE_ ).frames
snake_case : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
snake_case : Optional[Any] = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ ,expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def snake_case_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ ,expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
snake_case : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
snake_case : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
snake_case : List[str] = pipe.to("""cuda""" )
snake_case : Optional[Any] = """Spiderman is surfing"""
snake_case : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : str = pipe(SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,num_inference_steps=25 ,output_type="""pt""" ).frames
snake_case : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
snake_case : str = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
snake_case : Dict = pipe.to("""cuda""" )
snake_case : Tuple = """Spiderman is surfing"""
snake_case : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Any = pipe(SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,num_inference_steps=2 ,output_type="""pt""" ).frames
snake_case : Union[str, Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 315
| 0
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (KDPMaDiscreteScheduler,)
SCREAMING_SNAKE_CASE_ = 10
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = {
'num_train_timesteps': 1100,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def UpperCamelCase( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if str(SCREAMING_SNAKE_CASE_ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 42
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _A (__a ) -> str:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE_ : List[str] = model_type_to_module_name(__a )
SCREAMING_SNAKE_CASE_ : Dict = importlib.import_module(f'.{module_name}' , '''transformers.models''' )
try:
return getattr(__a , __a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__a , '''__name__''' , __a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module('''transformers''' )
if hasattr(__a , __a ):
return getattr(__a , __a )
return None
def _A (__a , __a = None , __a = False , __a = False , __a = None , __a = None , __a = None , __a = False , **__a , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_file_from_repo(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__a , encoding='''utf-8''' ) as reader:
return json.load(__a )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : int):
'''simple docstring'''
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''')
@classmethod
@replace_list_option_in_docstrings(lowercase_)
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('''config''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''trust_remote_code''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = FeatureExtractionMixin.get_feature_extractor_dict(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = config_dict.get('''feature_extractor_type''' , lowercase_)
SCREAMING_SNAKE_CASE_ : str = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {}):
SCREAMING_SNAKE_CASE_ : Optional[int] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.from_pretrained(lowercase_ , **lowercase_)
# It could be in `config.feature_extractor_type``
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(lowercase_ , '''feature_extractor_type''' , lowercase_)
if hasattr(lowercase_ , '''auto_map''') and "AutoFeatureExtractor" in config.auto_map:
SCREAMING_SNAKE_CASE_ : Dict = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
SCREAMING_SNAKE_CASE_ : Dict = feature_extractor_class_from_name(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = feature_extractor_auto_map is not None
SCREAMING_SNAKE_CASE_ : Dict = feature_extractor_class is not None or type(lowercase_) in FEATURE_EXTRACTOR_MAPPING
SCREAMING_SNAKE_CASE_ : Optional[int] = resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE_ : Dict = get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''code_revision''' , lowercase_)
if os.path.isdir(lowercase_):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase_) in FEATURE_EXTRACTOR_MAPPING:
SCREAMING_SNAKE_CASE_ : Any = FEATURE_EXTRACTOR_MAPPING[type(lowercase_)]
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
raise ValueError(
F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}')
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : List[Any]):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(lowercase_ , lowercase_)
| 512
| 0
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # noqa: E741
while r - l > 1:
lowerCAmelCase_ : int =(l + r) // 2
if v[m] >= key:
lowerCAmelCase_ : Tuple =m
else:
lowerCAmelCase_ : Optional[int] =m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
lowerCAmelCase_ : Optional[int] =[0] * len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Any =1
lowerCAmelCase_ : Optional[Any] =v[0]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
if v[i] < tail[0]:
lowerCAmelCase_ : List[Any] =v[i]
elif v[i] > tail[length - 1]:
lowerCAmelCase_ : List[Any] =v[i]
length += 1
else:
lowerCAmelCase_ : Tuple =v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''MobileNetV2FeatureExtractor''']
__lowercase = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 305
| 1
|
"""simple docstring"""
import argparse
import struct
import unittest
class __snake_case :
"""simple docstring"""
def __init__( self :Optional[Any] , UpperCamelCase__ :bytes ):
_a = data
# Initialize hash values
_a = [
0X6a_09_e6_67,
0Xbb_67_ae_85,
0X3c_6e_f3_72,
0Xa5_4f_f5_3a,
0X51_0e_52_7f,
0X9b_05_68_8c,
0X1f_83_d9_ab,
0X5b_e0_cd_19,
]
# Initialize round constants
_a = [
0X42_8a_2f_98,
0X71_37_44_91,
0Xb5_c0_fb_cf,
0Xe9_b5_db_a5,
0X39_56_c2_5b,
0X59_f1_11_f1,
0X92_3f_82_a4,
0Xab_1c_5e_d5,
0Xd8_07_aa_98,
0X12_83_5b_01,
0X24_31_85_be,
0X55_0c_7d_c3,
0X72_be_5d_74,
0X80_de_b1_fe,
0X9b_dc_06_a7,
0Xc1_9b_f1_74,
0Xe4_9b_69_c1,
0Xef_be_47_86,
0X0f_c1_9d_c6,
0X24_0c_a1_cc,
0X2d_e9_2c_6f,
0X4a_74_84_aa,
0X5c_b0_a9_dc,
0X76_f9_88_da,
0X98_3e_51_52,
0Xa8_31_c6_6d,
0Xb0_03_27_c8,
0Xbf_59_7f_c7,
0Xc6_e0_0b_f3,
0Xd5_a7_91_47,
0X06_ca_63_51,
0X14_29_29_67,
0X27_b7_0a_85,
0X2e_1b_21_38,
0X4d_2c_6d_fc,
0X53_38_0d_13,
0X65_0a_73_54,
0X76_6a_0a_bb,
0X81_c2_c9_2e,
0X92_72_2c_85,
0Xa2_bf_e8_a1,
0Xa8_1a_66_4b,
0Xc2_4b_8b_70,
0Xc7_6c_51_a3,
0Xd1_92_e8_19,
0Xd6_99_06_24,
0Xf4_0e_35_85,
0X10_6a_a0_70,
0X19_a4_c1_16,
0X1e_37_6c_08,
0X27_48_77_4c,
0X34_b0_bc_b5,
0X39_1c_0c_b3,
0X4e_d8_aa_4a,
0X5b_9c_ca_4f,
0X68_2e_6f_f3,
0X74_8f_82_ee,
0X78_a5_63_6f,
0X84_c8_78_14,
0X8c_c7_02_08,
0X90_be_ff_fa,
0Xa4_50_6c_eb,
0Xbe_f9_a3_f7,
0Xc6_71_78_f2,
]
_a = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ :bytes ):
_a = B"\x80" + (B"\x00" * (63 - (len(UpperCamelCase__ ) + 8) % 64))
_a = struct.pack(">Q" , (len(UpperCamelCase__ ) * 8) )
return data + padding + big_endian_integer
def SCREAMING_SNAKE_CASE_ ( self :int ):
# Convert into blocks of 64 bytes
_a = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_a = list(struct.unpack(">16L" , UpperCamelCase__ ) )
# add 48 0-ed integers
words += [0] * 48
_a , _a , _a , _a , _a , _a , _a , _a = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_a = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_a = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_a = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
_a = self.ror(UpperCamelCase__ , 6 ) ^ self.ror(UpperCamelCase__ , 11 ) ^ self.ror(UpperCamelCase__ , 25 )
_a = (e & f) ^ ((~e & 0Xff_ff_ff_ff) & g)
_a = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
_a = self.ror(UpperCamelCase__ , 2 ) ^ self.ror(UpperCamelCase__ , 13 ) ^ self.ror(UpperCamelCase__ , 22 )
_a = (a & b) ^ (a & c) ^ (b & c)
_a = (sa + maj) % 0X1_00_00_00_00
_a , _a , _a , _a , _a , _a , _a , _a = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
_a = [a, b, c, d, e, f, g, h]
# Modify final values
_a = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
_a = "".join([hex(UpperCamelCase__ )[2:].zfill(8 ) for value in self.hashes] )
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :int , UpperCamelCase__ :int ):
return 0Xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
import hashlib
_a = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(UpperCamelCase__ ).hash , hashlib.shaaaa(UpperCamelCase__ ).hexdigest() )
def __a ( ):
"""simple docstring"""
import doctest
doctest.testmod()
_a = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
_a = parser.parse_args()
_a = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
_a = f.read()
else:
_a = bytes(a, "utf-8" )
print(SHAaaa(a ).hash )
if __name__ == "__main__":
main()
| 388
|
"""simple docstring"""
from __future__ import annotations
import math
def __a ( a, a ):
"""simple docstring"""
_a = u
for i in range(1, a ):
_a = temp * (u - i)
return temp
def __a ( ):
"""simple docstring"""
_a = int(input("enter the numbers of values: " ) )
_a = []
for _ in range(a ):
y.append([] )
for i in range(a ):
for j in range(a ):
y[i].append(a )
_a = 0
print("enter the values of parameters in a list: " )
_a = list(map(a, input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a ):
_a = float(input() )
_a = int(input("enter the value to interpolate: " ) )
_a = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, a ):
for j in range(n - i ):
_a = y[j + 1][i - 1] - y[j][i - 1]
_a = y[0][0]
for i in range(1, a ):
summ += (ucal(a, a ) * y[0][i]) / math.factorial(a )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 388
| 1
|
def _snake_case ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
__UpperCAmelCase = generate_large_matrix()
__UpperCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _snake_case ( lowercase__ : list[list[int]] ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase__ , reverse=lowercase__ ) for row in grid )
assert all(list(lowercase__ ) == sorted(lowercase__ , reverse=lowercase__ ) for col in zip(*lowercase__ ) )
def _snake_case ( lowercase__ : list[int] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :str = len(lowercase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ :List[Any] = (left + right) // 2
lowerCAmelCase_ :List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ :Any = mid + 1
else:
lowerCAmelCase_ :Any = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase__ )
def _snake_case ( lowercase__ : list[list[int]] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[Any] = len(grid[0] )
for i in range(len(lowercase__ ) ):
lowerCAmelCase_ :str = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase__ ) * len(grid[0] )) - total
def _snake_case ( lowercase__ : list[list[int]] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def _snake_case ( lowercase__ : list[list[int]] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase__ ):
if number < 0:
total += len(lowercase__ ) - i
break
return total
def _snake_case ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("""Running benchmarks""" )
lowerCAmelCase_ :List[Any] = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ :Any = timeit(f"""{func}(grid=grid)""" , setup=lowercase__ , number=5_0_0 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 716
|
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A=None ) -> Tuple:
lowerCAmelCase_ :Optional[int] = data
lowerCAmelCase_ :List[Any] = None
def __repr__( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = []
lowerCAmelCase_ :int = self
while temp:
string_rep.append(f"""{temp.data}""" )
lowerCAmelCase_ :List[str] = temp.next
return "->".join(__A )
def _snake_case ( lowercase__ : list ) -> Union[str, Any]:
'''simple docstring'''
if not elements_list:
raise Exception("""The Elements List is empty""" )
lowerCAmelCase_ :int = Node(elements_list[0] )
for i in range(1 , len(lowercase__ ) ):
lowerCAmelCase_ :Tuple = Node(elements_list[i] )
lowerCAmelCase_ :Union[str, Any] = current.next
return head
def _snake_case ( lowercase__ : Node ) -> None:
'''simple docstring'''
if head_node is not None and isinstance(lowercase__ , lowercase__ ):
print_reverse(head_node.next )
print(head_node.data )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase_ :Union[str, Any] = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print("""Linked List:""" )
print(lowercase__ )
print("""Elements in Reverse:""" )
print_reverse(lowercase__ )
if __name__ == "__main__":
main()
| 256
| 0
|
from functools import reduce
snake_case_ : Any = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __a ( __UpperCAmelCase : Optional[int] = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __UpperCAmelCase , __UpperCAmelCase : str(int(SCREAMING_SNAKE_CASE_ ) * int(SCREAMING_SNAKE_CASE_ ) ) , n[i : i + 13] ) )
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 488
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCamelCase ( ) ->Optional[int]:
_lowerCamelCase : int = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
_lowerCamelCase : Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
_lowerCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_lowerCamelCase : List[str] = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 434
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a : Union[str, Any] = logging.get_logger(__name__)
a : Dict = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __UpperCamelCase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] ="""dpt"""
def __init__( self , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=384 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=[2, 5, 8, 11] , lowerCAmelCase__="project" , lowerCAmelCase__=[4, 2, 1, 0.5] , lowerCAmelCase__=[96, 192, 384, 768] , lowerCAmelCase__=256 , lowerCAmelCase__=-1 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.4 , lowerCAmelCase__=255 , lowerCAmelCase__=0.1 , lowerCAmelCase__=[1, 1024, 24, 24] , lowerCAmelCase__=[0, 1] , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[Any]:
super().__init__(**lowercase__ )
a : Any = hidden_size
a : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
a : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
a : Union[str, Any] = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
logger.info("Initializing the config with a `BiT` backbone." )
a : Dict = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
a : Optional[int] = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
a : List[Any] = backbone_featmap_shape
a : Union[str, Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
a : Optional[Any] = None
a : int = None
a : List[Any] = []
a : int = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Tuple = intermediate_size
a : Optional[int] = hidden_act
a : Dict = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : str = initializer_range
a : List[Any] = layer_norm_eps
a : Any = image_size
a : Union[str, Any] = patch_size
a : Union[str, Any] = num_channels
a : Union[str, Any] = qkv_bias
a : Optional[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
a : Any = readout_type
a : Optional[Any] = reassemble_factors
a : str = neck_hidden_sizes
a : Union[str, Any] = fusion_hidden_size
a : Any = head_in_index
a : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
a : List[Any] = use_auxiliary_head
a : int = auxiliary_loss_weight
a : Union[str, Any] = semantic_loss_ignore_index
a : Any = semantic_classifier_dropout
def __a ( self ) -> Optional[int]:
a : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
a : List[str] = self.backbone_config.to_dict()
a : Optional[int] = self.__class__.model_type
return output
| 701
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a, a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 31
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class _A ( __lowercase ):
lowercase__: Any = '''audio-spectrogram-transformer'''
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=7_68 , __magic_name__ : Optional[Any]=12 , __magic_name__ : int=12 , __magic_name__ : Union[str, Any]=30_72 , __magic_name__ : List[str]="gelu" , __magic_name__ : Any=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=1E-12 , __magic_name__ : str=16 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]=10 , __magic_name__ : Any=10 , __magic_name__ : Tuple=10_24 , __magic_name__ : Optional[int]=1_28 , **__magic_name__ : int , ) -> Dict:
"""simple docstring"""
super().__init__(**__magic_name__ )
__snake_case : Any = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Any = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : Dict = patch_size
__snake_case : Any = qkv_bias
__snake_case : List[Any] = frequency_stride
__snake_case : int = time_stride
__snake_case : Tuple = max_length
__snake_case : int = num_mel_bins
| 26
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26
| 1
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
lowerCamelCase__ : Optional[Any] =1 - (matter_density + radiation_density + dark_energy)
lowerCamelCase__ : Any =(
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCamelCase__ : Union[str, Any] =hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowercase : int = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 625
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =x
lowerCamelCase__ : Any =y
for step in range(__lowerCamelCase ): # noqa: B007
lowerCamelCase__ : List[Any] =a * a - b * b + x
lowerCamelCase__ : Optional[int] =2 * a * b + y
lowerCamelCase__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase__ : Optional[int] =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height
lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 625
| 1
|
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __snake_case ( __A : Union[str, Any] , __A : Any , __A : Tuple , __A : Union[str, Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
# load base model
SCREAMING_SNAKE_CASE : int = StableDiffusionPipeline.from_pretrained(__A , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
SCREAMING_SNAKE_CASE : List[Any] = load_file(__A )
SCREAMING_SNAKE_CASE : int = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
SCREAMING_SNAKE_CASE : str = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.text_encoder
else:
SCREAMING_SNAKE_CASE : Tuple = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Dict = pipeline.unet
# find the target layer
SCREAMING_SNAKE_CASE : List[str] = layer_infos.pop(0 )
while len(__A ) > -1:
try:
SCREAMING_SNAKE_CASE : List[str] = curr_layer.__getattr__(__A )
if len(__A ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = layer_infos.pop(0 )
elif len(__A ) == 0:
break
except Exception:
if len(__A ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
SCREAMING_SNAKE_CASE : Dict = layer_infos.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(__A )
else:
pair_keys.append(__A )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
SCREAMING_SNAKE_CASE : int = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__A , __A ).unsqueeze(2 ).unsqueeze(3 )
else:
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[pair_keys[0]].to(torch.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__A , __A )
# update visited list
for item in pair_keys:
visited.append(__A )
return pipeline
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
A_ : Dict = parser.parse_args()
A_ : Optional[int] = args.base_model_path
A_ : str = args.checkpoint_path
A_ : List[Any] = args.dump_path
A_ : Union[str, Any] = args.lora_prefix_unet
A_ : Tuple = args.lora_prefix_text_encoder
A_ : Union[str, Any] = args.alpha
A_ : Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A_ : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 265
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : int = 50_257 , _SCREAMING_SNAKE_CASE : int = 1_024 , _SCREAMING_SNAKE_CASE : int = 768 , _SCREAMING_SNAKE_CASE : int = 12 , _SCREAMING_SNAKE_CASE : int = 12 , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : str = "gelu_new" , _SCREAMING_SNAKE_CASE : float = 0.1 , _SCREAMING_SNAKE_CASE : float = 0.1 , _SCREAMING_SNAKE_CASE : float = 0.1 , _SCREAMING_SNAKE_CASE : float = 1E-5 , _SCREAMING_SNAKE_CASE : float = 0.0_2 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
SCREAMING_SNAKE_CASE : int = prefix_inner_dim
SCREAMING_SNAKE_CASE : Dict = prefix_hidden_dim
SCREAMING_SNAKE_CASE : int = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : List[Any] = (
nn.Linear(self.prefix_hidden_dim , _SCREAMING_SNAKE_CASE ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig(
vocab_size=_SCREAMING_SNAKE_CASE , n_positions=_SCREAMING_SNAKE_CASE , n_embd=_SCREAMING_SNAKE_CASE , n_layer=_SCREAMING_SNAKE_CASE , n_head=_SCREAMING_SNAKE_CASE , n_inner=_SCREAMING_SNAKE_CASE , activation_function=_SCREAMING_SNAKE_CASE , resid_pdrop=_SCREAMING_SNAKE_CASE , embd_pdrop=_SCREAMING_SNAKE_CASE , attn_pdrop=_SCREAMING_SNAKE_CASE , layer_norm_epsilon=_SCREAMING_SNAKE_CASE , initializer_range=_SCREAMING_SNAKE_CASE , scale_attn_weights=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , scale_attn_by_inverse_layer_idx=_SCREAMING_SNAKE_CASE , reorder_and_upcast_attn=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Any , _SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , _SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.transformer.transformer.wte(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = self.encode_prefix(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = self.decode_prefix(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = self.transformer(inputs_embeds=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _lowerCAmelCase ( self : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : torch.device ) -> torch.Tensor:
"""simple docstring"""
return torch.zeros(_SCREAMING_SNAKE_CASE , self.prefix_length , dtype=torch.intaa , device=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
"""simple docstring"""
return self.encode_prefix(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowerCAmelCase ( self : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_SCREAMING_SNAKE_CASE , 1 , dim=0 )
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : str = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[Any] = self.decode_prefix(feature.to(_SCREAMING_SNAKE_CASE ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.generate_beam(
input_embeds=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : List[str] = torch.stack(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = torch.stack(_SCREAMING_SNAKE_CASE )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _lowerCAmelCase ( self : List[str] , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : int = 5 , _SCREAMING_SNAKE_CASE : int = 67 , _SCREAMING_SNAKE_CASE : float = 1.0 , _SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = eos_token_id
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=torch.int )
SCREAMING_SNAKE_CASE : Dict = torch.zeros(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : List[Any] = input_embeds
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.transformer.wte(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Tuple = self.transformer(inputs_embeds=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Optional[Any] = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = logits.topk(_SCREAMING_SNAKE_CASE , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(_SCREAMING_SNAKE_CASE , *generated.shape[1:] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : Optional[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : List[Any] = tokens.expand(_SCREAMING_SNAKE_CASE , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : List[str] = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : List[str] = -float(np.inf )
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Tuple = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : Any = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = scores_sum_average.view(-1 ).topk(_SCREAMING_SNAKE_CASE , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : List[str] = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : int = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : Any = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Tuple = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : Tuple = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : str = is_stopped + next_tokens.eq(_SCREAMING_SNAKE_CASE ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : Tuple = scores / seq_lengths
SCREAMING_SNAKE_CASE : Tuple = scores.argsort(descending=_SCREAMING_SNAKE_CASE )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : List[str] = torch.stack(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE : int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 265
| 1
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ : Optional[int] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase__ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase__ : Dict = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE_ )
return next_generation
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Create output image
lowerCAmelCase__ : Any = Image.new("RGB" , (len(cells[0] ), len(SCREAMING_SNAKE_CASE_ )) )
lowerCAmelCase__ : str = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE_ ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase__ : Optional[int] = 2_5_5 - cells[y][x] * 2_5_5
lowerCAmelCase__ : Optional[int] = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = new_generation(SCREAMING_SNAKE_CASE_ )
return images
if __name__ == "__main__":
snake_case = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 703
|
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
snake_case = 20_48
snake_case = 40_96
snake_case = 42
snake_case = os.environ.pop("""PROCESS_TRAIN""", """false""")
snake_case = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def choose_first(lowerCamelCase_ , lowerCamelCase_=False ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) == 1:
lowerCAmelCase__ : Union[str, Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCAmelCase__ : str = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowerCAmelCase__ : Dict = {"id": example["id"]}
lowerCAmelCase__ : Optional[int] = example["annotations"]
lowerCAmelCase__ : Dict = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCAmelCase__ : str = ["yes"] if 1 in yes_no_answer else ["no"]
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : List[str] = ["<cls>"]
else:
lowerCAmelCase__ : Optional[Any] = ["short"]
lowerCAmelCase__ : Tuple = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowerCAmelCase__ : str = ["long"]
lowerCAmelCase__ : Tuple = choose_first(annotation["long_answer"] , is_long_answer=lowerCamelCase_ )
lowerCAmelCase__ : Any = []
answer.update(lowerCamelCase_ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowerCAmelCase__ : Any = True
else:
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Tuple = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , lowerCamelCase_ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = _get_single_answer(lowerCamelCase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : Dict = example["document"]["tokens"]
lowerCAmelCase__ : Union[str, Any] = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(lowerCamelCase_ ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCAmelCase__ : List[Any] = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowerCAmelCase__ : List[str] = example["document"]["tokens"]
lowerCAmelCase__ : Union[str, Any] = answer["start_token"]
lowerCAmelCase__ : str = answer["end_token"]
lowerCAmelCase__ : str = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCAmelCase__ : str = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowerCAmelCase__ : str = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowerCAmelCase__ : str = doc["token"][answer["start_token"] : answer["end_token"]]
lowerCAmelCase__ : Optional[int] = " ".join([old[i] for i in range(len(lowerCamelCase_ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , lowerCamelCase_ , end="\n" )
print("Old:" , lowerCamelCase_ , end="\n\n" )
return {
"context": " ".join(lowerCamelCase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_=True ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = get_context_and_ans(lowerCamelCase_ , assertion=lowerCamelCase_ )
lowerCAmelCase__ : int = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCAmelCase__ : int = tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowerCAmelCase__ : List[str] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[Any] = input_ids[:q_len]
lowerCAmelCase__ : List[str] = range(lowerCamelCase_ , len(lowerCamelCase_ ) , max_length - doc_stride )
for i in doc_start_indices:
lowerCAmelCase__ : int = i + max_length - q_len
lowerCAmelCase__ : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(lowerCamelCase_ ),
"end_token": [-1_0_0] * len(lowerCamelCase_ ),
"category": category,
},
}
lowerCAmelCase__ : Optional[Any] = out["context"].split()
lowerCAmelCase__ : Dict = splitted_context[answer["end_token"]]
lowerCAmelCase__ : Union[str, Any] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=lowerCamelCase_ , ).input_ids )
lowerCAmelCase__ : List[str] = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=lowerCamelCase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCAmelCase__ : Optional[Any] = len(tokenizer(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCAmelCase__ : str = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowerCAmelCase__ : str = answer["start_token"]
lowerCAmelCase__ : Union[str, Any] = answer["end_token"]
if assertion:
lowerCAmelCase__ : Union[str, Any] = tokenizer.decode(lowerCamelCase_ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , lowerCamelCase_ , end="\n\n" )
if len(lowerCamelCase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCAmelCase__ : int = input_ids[:q_len]
lowerCAmelCase__ : List[str] = range(lowerCamelCase_ , len(lowerCamelCase_ ) , max_length - doc_stride )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Optional[int] = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCAmelCase__ : Optional[Any] = i + max_length - q_len
lowerCAmelCase__ : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCAmelCase__ : Any = start_token - i + q_len
lowerCAmelCase__ : Optional[Any] = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowerCAmelCase__ : Union[str, Any] = -1_0_0
lowerCAmelCase__ : Optional[Any] = -1_0_0
answers_category.append("null" )
lowerCAmelCase__ : Any = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase_ )
answers_end_token.append(lowerCamelCase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(lowerCamelCase_ ) )
print("Old:" , tokenizer.decode(lowerCamelCase_ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_=False ):
"""simple docstring"""
lowerCAmelCase__ : Any = get_strided_contexts_and_ans(
lowerCamelCase_ , lowerCamelCase_ , doc_stride=lowerCamelCase_ , max_length=lowerCamelCase_ , assertion=lowerCamelCase_ , )
return example
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with jsonlines.open(lowerCamelCase_ , "a" ) as writer:
for example in tqdm(lowerCamelCase_ , total=len(lowerCamelCase_ ) , desc="Saving samples ... " ):
lowerCAmelCase__ : List[str] = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
snake_case = load_dataset("""natural_questions""")
snake_case = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
snake_case = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
snake_case = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
snake_case = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
snake_case = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
snake_case = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 568
| 0
|
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]="" , SCREAMING_SNAKE_CASE : Union[str, Any]="train" ):
assert os.path.isdir(SCREAMING_SNAKE_CASE )
lowercase__ : str = []
lowercase__ : List[Any] = os.listdir(SCREAMING_SNAKE_CASE )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowercase__ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not os.path.isfile(SCREAMING_SNAKE_CASE ):
continue
self.documents.append(SCREAMING_SNAKE_CASE )
def __len__( self : List[Any] ):
return len(self.documents )
def __getitem__( self : Any , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Union[str, Any] = self.documents[idx]
lowercase__ : Tuple = document_path.split("/" )[-1]
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as source:
lowercase__ : Dict = source.read()
lowercase__ , lowercase__ : List[Any] = process_story(SCREAMING_SNAKE_CASE )
return document_name, story_lines, summary_lines
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = list(filter(lambda lowerCamelCase__ : len(lowerCamelCase__ ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
lowercase__ : Any = [_add_missing_period(lowerCamelCase__ ) for line in nonempty_lines]
# gather article lines
lowercase__ : List[str] = []
lowercase__ : Tuple = deque(lowerCamelCase__ )
while True:
try:
lowercase__ : int = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(lowerCamelCase__ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowercase__ : List[str] = list(filter(lambda lowerCamelCase__ : not t.startswith("@highlight" ) , lowerCamelCase__ ) )
return story_lines, summary_lines
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if len(lowerCamelCase__ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(lowerCamelCase__ )) )
return sequence
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = torch.ones_like(lowerCamelCase__ )
lowercase__ : Optional[int] = sequence == pad_token_id
lowercase__ : Tuple = 0
return mask
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = [tokenizer.encode(lowerCamelCase__ ) for line in story_lines]
lowercase__ : List[Any] = [token for sentence in story_lines_token_ids for token in sentence]
lowercase__ : str = [tokenizer.encode(lowerCamelCase__ ) for line in summary_lines]
lowercase__ : List[str] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = []
for sequence in batch:
lowercase__ : Optional[int] = -1
lowercase__ : int = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(lowerCamelCase__ )
return torch.tensor(lowerCamelCase__ )
| 496
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ , lowercase__ : str = emb.weight.shape
lowercase__ : List[str] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
lowercase__ : List[Any] = emb.weight.data
return lin_layer
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__="facebook/mbart-large-en-ro" , lowerCamelCase__=False , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : int = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
remove_ignore_keys_(lowerCamelCase__ )
lowercase__ : Tuple = state_dict["encoder.embed_tokens.weight"].shape[0]
lowercase__ : Optional[Any] = MBartConfig.from_pretrained(lowerCamelCase__ , vocab_size=lowerCamelCase__ )
if mbart_aa and finetuned:
lowercase__ : str = "relu"
lowercase__ : Optional[int] = state_dict["decoder.embed_tokens.weight"]
lowercase__ : Optional[Any] = MBartForConditionalGeneration(lowerCamelCase__ )
model.model.load_state_dict(lowerCamelCase__ )
if finetuned:
lowercase__ : Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 496
| 1
|
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase ( A : Optional[int] , A : List[Any] , A : Optional[int] , A : Union[str, Any] ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = np.full((len(_SCREAMING_SNAKE_CASE ), sequence_length, 2) , _SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = np.full((len(_SCREAMING_SNAKE_CASE ), sequence_length) , _SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(_SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = tensor[:sequence_length]
else:
_UpperCAmelCase = tensor[:sequence_length]
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = tensor[:sequence_length]
else:
_UpperCAmelCase = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
_UpperCAmelCase = ord(_SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
_UpperCAmelCase = unicodedata.category(_SCREAMING_SNAKE_CASE )
if cat.startswith('P' ):
return True
return False
@dataclass
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = 42
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = -1_00
_UpperCAmelCase = "pt"
def lowerCamelCase_ ( self , snake_case ) -> str:
import torch
_UpperCAmelCase = 'label' if 'label' in features[0].keys() else 'labels'
_UpperCAmelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_UpperCAmelCase = self.tokenizer.pad(
snake_case , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
_UpperCAmelCase = torch.tensor(batch['entity_ids'] ).shape[1]
_UpperCAmelCase = self.tokenizer.padding_side
if padding_side == "right":
_UpperCAmelCase = [
list(snake_case ) + [self.label_pad_token_id] * (sequence_length - len(snake_case )) for label in labels
]
else:
_UpperCAmelCase = [
[self.label_pad_token_id] * (sequence_length - len(snake_case )) + list(snake_case ) for label in labels
]
_UpperCAmelCase = [feature['ner_tags'] for feature in features]
_UpperCAmelCase = padding_tensor(snake_case , -1 , snake_case , snake_case )
_UpperCAmelCase = [feature['original_entity_spans'] for feature in features]
_UpperCAmelCase = padding_tensor(snake_case , (-1, -1) , snake_case , snake_case )
_UpperCAmelCase = {k: torch.tensor(snake_case , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 700
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''beit'''
def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str:
super().__init__(**snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24
| 0
|
"""simple docstring"""
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
A__ = n
A__ = [None] * self.n
A__ = 0 # index of the first element
A__ = 0
A__ = 0
def __len__( self ):
"""simple docstring"""
return self.size
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.size == 0
def _UpperCAmelCase ( self ):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A__ = data
A__ = (self.rear + 1) % self.n
self.size += 1
return self
def _UpperCAmelCase ( self ):
"""simple docstring"""
if self.size == 0:
raise Exception('UNDERFLOW' )
A__ = self.array[self.front]
A__ = None
A__ = (self.front + 1) % self.n
self.size -= 1
return temp
| 260
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
if isinstance(__a , __a ):
A__ = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , __a , __a , __a ):
"""simple docstring"""
if len(__a ) == 0 or len(__a ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(__a ) )
if isinstance(__a , __a ):
A__ = [sequences]
A__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_lowerCamelCase )
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , __a=ZeroShotClassificationArgumentHandler() , *__a , **__a ):
"""simple docstring"""
A__ = args_parser
super().__init__(*__a , **__a )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def _UpperCAmelCase ( self , __a , __a=True , __a=True , __a=TruncationStrategy.ONLY_FIRST , **__a ):
"""simple docstring"""
A__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
A__ = self.tokenizer.eos_token
try:
A__ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A__ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _UpperCAmelCase ( self , **__a ):
"""simple docstring"""
if kwargs.get('multi_class' , __a ) is not None:
A__ = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
A__ = {}
if "candidate_labels" in kwargs:
A__ = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
A__ = kwargs['hypothesis_template']
A__ = {}
if "multi_label" in kwargs:
A__ = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , __a , *__a , **__a , ):
"""simple docstring"""
if len(__a ) == 0:
pass
elif len(__a ) == 1 and "candidate_labels" not in kwargs:
A__ = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(__a , **__a )
def _UpperCAmelCase ( self , __a , __a=None , __a="This example is {}." ):
"""simple docstring"""
A__ , A__ = self._args_parser(__a , __a , __a )
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a ) ):
A__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a ) - 1,
**model_input,
}
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = inputs['candidate_label']
A__ = inputs['sequence']
A__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
A__ = self.model(**__a )
A__ = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def _UpperCAmelCase ( self , __a , __a=False ):
"""simple docstring"""
A__ = [outputs['candidate_label'] for outputs in model_outputs]
A__ = [outputs['sequence'] for outputs in model_outputs]
A__ = np.concatenate([output['logits'].numpy() for output in model_outputs] )
A__ = logits.shape[0]
A__ = len(__a )
A__ = N // n
A__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A__ = self.entailment_id
A__ = -1 if entailment_id == 0 else 0
A__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
A__ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
A__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A__ = reshaped_outputs[..., self.entailment_id]
A__ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
A__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 260
| 1
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowerCamelCase : Tuple = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class lowerCamelCase__ ( __snake_case ):
def __init__( self , **lowerCAmelCase__ ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
"""simple docstring"""
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self , **lowerCAmelCase__ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] ={}
if "candidate_labels" in kwargs:
_UpperCamelCase :Any =kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_UpperCamelCase :Dict =kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="This is a photo of {}." ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =load_image(lowerCAmelCase__ )
_UpperCamelCase :Tuple =self.image_processor(images=[image] , return_tensors=self.framework )
_UpperCamelCase :Tuple =candidate_labels
_UpperCamelCase :Dict =[hypothesis_template.format(lowerCAmelCase__ ) for x in candidate_labels]
_UpperCamelCase :Optional[Any] =self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework , padding=lowerCAmelCase__ )
_UpperCamelCase :List[str] =[text_inputs]
return inputs
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :int =model_inputs.pop("""candidate_labels""" )
_UpperCamelCase :Optional[int] =model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , lowerCAmelCase__ ):
_UpperCamelCase :Optional[Any] =text_inputs[0]
else:
# Batching case.
_UpperCamelCase :Union[str, Any] =text_inputs[0][0]
_UpperCamelCase :List[str] =self.model(**lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] ={
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Any:
"""simple docstring"""
_UpperCamelCase :Dict =model_outputs.pop("""candidate_labels""" )
_UpperCamelCase :Dict =model_outputs["""logits"""][0]
if self.framework == "pt":
_UpperCamelCase :Any =logits.softmax(dim=-1 ).squeeze(-1 )
_UpperCamelCase :Any =probs.tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :Any =[scores]
elif self.framework == "tf":
_UpperCamelCase :Any =stable_softmax(lowerCAmelCase__ , axis=-1 )
_UpperCamelCase :List[Any] =probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
_UpperCamelCase :str =[
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : -x[0] )
]
return result
| 512
|
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_lowerCamelCase : str = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class lowerCamelCase__ ( tr.AbstractTransform ):
def __init__( self , lowerCAmelCase__ = " " ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Dict =sentence_delimiter
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Dict:
"""simple docstring"""
return list(lowerCAmelCase__ )
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :int =[]
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
_lowerCamelCase : Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_lowerCamelCase : str = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_lowerCamelCase : int = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
_lowerCamelCase : Tuple = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
_lowerCamelCase : Optional[int] = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"]
_UpperCamelCase :str =0
_UpperCamelCase :Tuple =0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :Optional[int] =jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 512
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : int , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def a ( self : str ):
__UpperCAmelCase = None
__UpperCAmelCase = 20
__UpperCAmelCase = self._get_uniform_logits(batch_size=2 , length=_lowercase )
# tweak scores to not be uniform anymore
__UpperCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__UpperCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__UpperCAmelCase = jax.nn.softmax(_lowercase , axis=-1 )
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__UpperCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(_lowercase , scores.copy() , cur_len=_lowercase ) , axis=-1 )
__UpperCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(_lowercase , scores.copy() , cur_len=_lowercase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def a ( self : List[Any] ):
__UpperCAmelCase = None
__UpperCAmelCase = 10
__UpperCAmelCase = 2
# create ramp distribution
__UpperCAmelCase = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, vocab_size) ).copy()
__UpperCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__UpperCAmelCase = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__UpperCAmelCase = 5
__UpperCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__UpperCAmelCase = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, length) ).copy()
__UpperCAmelCase = top_k_warp_safety_check(_lowercase , _lowercase , cur_len=_lowercase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def a ( self : str ):
__UpperCAmelCase = None
__UpperCAmelCase = 10
__UpperCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__UpperCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
__UpperCAmelCase = np.exp(top_p_warp(_lowercase , _lowercase , cur_len=_lowercase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__UpperCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__UpperCAmelCase = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__UpperCAmelCase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__UpperCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__UpperCAmelCase = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def a ( self : List[str] ):
__UpperCAmelCase = 20
__UpperCAmelCase = 4
__UpperCAmelCase = 0
__UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
# check that min length is applied at length 5
__UpperCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
__UpperCAmelCase = 5
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = min_dist_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = 15
__UpperCAmelCase = min_dist_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def a ( self : List[Any] ):
__UpperCAmelCase = 20
__UpperCAmelCase = 4
__UpperCAmelCase = 0
__UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
# check that all scores are -inf except the bos_token_id score
__UpperCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
__UpperCAmelCase = 1
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__UpperCAmelCase = 3
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def a ( self : Optional[int] ):
__UpperCAmelCase = 20
__UpperCAmelCase = 4
__UpperCAmelCase = 0
__UpperCAmelCase = 5
__UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
# check that all scores are -inf except the eos_token_id when max_length is reached
__UpperCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
__UpperCAmelCase = 4
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__UpperCAmelCase = 3
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def a ( self : Any ):
__UpperCAmelCase = 4
__UpperCAmelCase = 10
__UpperCAmelCase = 15
__UpperCAmelCase = 2
__UpperCAmelCase = 1
__UpperCAmelCase = 15
# dummy input_ids and scores
__UpperCAmelCase = ids_tensor((batch_size, sequence_length) , _lowercase )
__UpperCAmelCase = input_ids.copy()
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = scores.copy()
# instantiate all dist processors
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
__UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
__UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
__UpperCAmelCase = 10
# no processor list
__UpperCAmelCase = temp_dist_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = min_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = bos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = eos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
# with processor list
__UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCAmelCase = processor(_lowercase , _lowercase , cur_len=_lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def a ( self : int ):
__UpperCAmelCase = 4
__UpperCAmelCase = 10
__UpperCAmelCase = 15
__UpperCAmelCase = 2
__UpperCAmelCase = 1
__UpperCAmelCase = 15
# dummy input_ids and scores
__UpperCAmelCase = ids_tensor((batch_size, sequence_length) , _lowercase )
__UpperCAmelCase = input_ids.copy()
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = scores.copy()
# instantiate all dist processors
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
__UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
__UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
__UpperCAmelCase = 10
# no processor list
def run_no_processor_list(_lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : List[Any] ):
__UpperCAmelCase = temp_dist_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = min_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = bos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = eos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
return scores
# with processor list
def run_processor_list(_lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : int ):
__UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCAmelCase = processor(_lowercase , _lowercase , cur_len=_lowercase )
return scores
__UpperCAmelCase = jax.jit(_lowercase )
__UpperCAmelCase = jax.jit(_lowercase )
__UpperCAmelCase = jitted_run_no_processor_list(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = jitted_run_processor_list(_lowercase , _lowercase , _lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 49
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''van'''
def __init__( self : Union[str, Any] ,A_ : Dict=224 ,A_ : Tuple=3 ,A_ : Optional[int]=[7, 3, 3, 3] ,A_ : Optional[Any]=[4, 2, 2, 2] ,A_ : Dict=[64, 128, 320, 512] ,A_ : Tuple=[3, 3, 12, 3] ,A_ : Optional[Any]=[8, 8, 4, 4] ,A_ : Any="gelu" ,A_ : Any=0.02 ,A_ : List[Any]=1e-6 ,A_ : Optional[int]=1e-2 ,A_ : str=0.0 ,A_ : str=0.0 ,**A_ : List[Any] ,) -> List[Any]:
super().__init__(**__A )
A = image_size
A = num_channels
A = patch_sizes
A = strides
A = hidden_sizes
A = depths
A = mlp_ratios
A = hidden_act
A = initializer_range
A = layer_norm_eps
A = layer_scale_init_value
A = drop_path_rate
A = dropout_rate
| 702
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12
| 22
| 0
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowercase__ ( A__ ):
a_ ="Wav2Vec2FeatureExtractor"
a_ ="AutoTokenizer"
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
super().__init__(__lowercase , __lowercase )
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
@classmethod
def UpperCAmelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
try:
return super().from_pretrained(__lowercase , **__lowercase )
except OSError:
warnings.warn(
F"Loading a tokenizer inside {cls.__name__} from a config that does not"
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , __lowercase , )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase__ = WavaVecaCTCTokenizer.from_pretrained(__lowercase , **__lowercase )
return cls(feature_extractor=__lowercase , tokenizer=__lowercase )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__lowercase , **__lowercase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCAmelCase__ = kwargs.pop("raw_speech" )
else:
lowerCAmelCase__ = kwargs.pop("audio" , __lowercase )
lowerCAmelCase__ = kwargs.pop("sampling_rate" , __lowercase )
lowerCAmelCase__ = kwargs.pop("text" , __lowercase )
if len(__lowercase ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCAmelCase__ = self.feature_extractor(__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
if text is not None:
lowerCAmelCase__ = self.tokenizer(__lowercase , **__lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ = encodings["""input_ids"""]
return inputs
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*__lowercase , **__lowercase )
lowerCAmelCase__ = kwargs.pop("input_features" , __lowercase )
lowerCAmelCase__ = kwargs.pop("labels" , __lowercase )
if len(__lowercase ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if input_features is not None:
lowerCAmelCase__ = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase )
if labels is not None:
lowerCAmelCase__ = self.tokenizer.pad(__lowercase , **__lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCAmelCase__ = labels["""input_ids"""]
return input_features
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__lowercase , **__lowercase )
@contextmanager
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer
yield
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
| 339
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict ={"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] =[
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_lowercase : int =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 136
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
UpperCAmelCase = (720, 1280) # Height, Width
UpperCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
UpperCAmelCase = 1 / 100
UpperCAmelCase = ''''''
UpperCAmelCase = ''''''
UpperCAmelCase = ''''''
UpperCAmelCase = 250
def UpperCAmelCase_ ( ):
lowercase , lowercase = get_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for index in range(__SCREAMING_SNAKE_CASE ):
lowercase = random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 4 )
lowercase , lowercase , lowercase = update_image_and_anno(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , filter_scale=__SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase = random_chars(32 )
lowercase = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
lowercase = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase = []
for anno in new_annos:
lowercase = anno[3] - anno[1]
lowercase = anno[4] - anno[2]
lowercase = anno[1] + width / 2
lowercase = anno[2] + height / 2
lowercase = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__SCREAMING_SNAKE_CASE )
with open(F'''{file_root}.txt''' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
lowercase = []
for label_file in glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '*.txt' ) ):
lowercase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__SCREAMING_SNAKE_CASE ) as in_file:
lowercase = in_file.readlines()
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , F'''{label_name}.jpg''' )
lowercase = []
for obj_list in obj_lists:
lowercase = obj_list.rstrip('\n' ).split(' ' )
lowercase = float(obj[1] ) - float(obj[3] ) / 2
lowercase = float(obj[2] ) - float(obj[4] ) / 2
lowercase = float(obj[1] ) + float(obj[3] ) / 2
lowercase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
return img_paths, labels
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 , ):
lowercase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase = int(scale_x * output_size[1] )
lowercase = int(scale_y * output_size[0] )
lowercase = []
lowercase = []
for i, index in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase = all_img_list[index]
path_list.append(__SCREAMING_SNAKE_CASE )
lowercase = all_annos[index]
lowercase = cva.imread(__SCREAMING_SNAKE_CASE )
if i == 0: # top-left
lowercase = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
lowercase = img
for bbox in img_annos:
lowercase = bbox[1] * scale_x
lowercase = bbox[2] * scale_y
lowercase = bbox[3] * scale_x
lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase = cva.resize(__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
lowercase = img
for bbox in img_annos:
lowercase = scale_x + bbox[1] * (1 - scale_x)
lowercase = bbox[2] * scale_y
lowercase = scale_x + bbox[3] * (1 - scale_x)
lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
lowercase = img
for bbox in img_annos:
lowercase = bbox[1] * scale_x
lowercase = scale_y + bbox[2] * (1 - scale_y)
lowercase = bbox[3] * scale_x
lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase = cva.resize(
__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase = img
for bbox in img_annos:
lowercase = scale_x + bbox[1] * (1 - scale_x)
lowercase = scale_y + bbox[2] * (1 - scale_y)
lowercase = scale_x + bbox[3] * (1 - scale_x)
lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
assert number_char > 1, "The number of character should greater than 1"
lowercase = ascii_lowercase + digits
return "".join(random.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 565
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase = set()
return any(
node not in visited and depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for node in graph )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
visited.add(__SCREAMING_SNAKE_CASE )
rec_stk.add(__SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 565
| 1
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
__magic_name__ : Any = False
if num < 0:
__magic_name__ : str = True
__magic_name__ : Optional[Any] = -num
__magic_name__ : Any = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(__UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 561
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
a_ : Any = logging.get_logger(__name__)
a_ : str = TypeVar('DatasetType', Dataset, IterableDataset)
def __a ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCAmelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCAmelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCAmelCase ).__name__}." )
if i == 0:
a__ , a__ = (
(Dataset, IterableDataset) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , stopping_strategy=__UpperCAmelCase )
else:
return _interleave_iterable_datasets(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , stopping_strategy=__UpperCAmelCase )
def __a ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCAmelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCAmelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCAmelCase ).__name__}." )
if i == 0:
a__ , a__ = (
(Dataset, IterableDataset) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , axis=__UpperCAmelCase )
else:
return _concatenate_iterable_datasets(__UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , axis=__UpperCAmelCase )
| 194
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = StableDiffusionInstructPixaPixPipeline
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
_snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
_snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
snake_case : Any = PNDMScheduler(skip_prk_steps=A )
torch.manual_seed(0 )
snake_case : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case : int = CLIPTextModel(A )
snake_case : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase ( self , A , A=0 ) -> Optional[int]:
snake_case : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Any = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
snake_case : Any = torch.manual_seed(A )
else:
snake_case : List[Any] = torch.Generator(device=A ).manual_seed(A )
snake_case : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : List[Any] = self.get_dummy_components()
snake_case : Optional[int] = StableDiffusionInstructPixaPixPipeline(**A )
snake_case : Union[str, Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Optional[Any] = self.get_dummy_inputs(A )
snake_case : str = sd_pipe(**A ).images
snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Dict = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Union[str, Any] = self.get_dummy_components()
snake_case : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**A )
snake_case : Dict = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Optional[Any] = self.get_dummy_inputs(A )
snake_case : Dict = """french fries"""
snake_case : Optional[Any] = sd_pipe(**A , negative_prompt=A )
snake_case : Dict = output.images
snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : List[Any] = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : str = self.get_dummy_components()
snake_case : Dict = StableDiffusionInstructPixaPixPipeline(**A )
snake_case : List[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Dict = self.get_dummy_inputs(A )
snake_case : Dict = [inputs["""prompt"""]] * 2
snake_case : int = np.array(inputs["""image"""] ).astype(np.floataa ) / 2_55.0
snake_case : str = torch.from_numpy(A ).unsqueeze(0 ).to(A )
snake_case : Union[str, Any] = image / 2 + 0.5
snake_case : int = image.permute(0 , 3 , 1 , 2 )
snake_case : Optional[Any] = image.repeat(2 , 1 , 1 , 1 )
snake_case : Dict = sd_pipe(**A ).images
snake_case : str = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
snake_case : List[str] = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : str = self.get_dummy_components()
snake_case : Any = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" )
snake_case : Dict = StableDiffusionInstructPixaPixPipeline(**A )
snake_case : List[str] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Optional[int] = self.get_dummy_inputs(A )
snake_case : List[str] = sd_pipe(**A ).images
snake_case : Dict = image[0, -3:, -3:, -1]
snake_case : int = [round(A , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(A ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Tuple = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[Any] = self.get_dummy_components()
snake_case : Optional[int] = StableDiffusionInstructPixaPixPipeline(**A )
snake_case : Tuple = VaeImageProcessor(do_resize=A , do_normalize=A )
snake_case : Union[str, Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(A , input_image_type="""pt""" ) )[0]
snake_case : Dict = components["""vae"""]
snake_case : Optional[Any] = self.get_dummy_inputs_by_type(A , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case : Any = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case : Tuple = pipe(**A )[0]
snake_case : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(A , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , A=0 ) -> Optional[Any]:
snake_case : List[str] = torch.manual_seed(A )
snake_case : int = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
snake_case : Union[str, Any] = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : Dict = self.get_inputs()
snake_case : Optional[Any] = pipe(**A ).images
snake_case : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case : Union[str, Any] = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=A )
snake_case : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : List[str] = self.get_inputs()
snake_case : List[Any] = pipe(**A ).images
snake_case : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case : str = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> str:
snake_case : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=A )
snake_case : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : Union[str, Any] = self.get_inputs()
snake_case : str = pipe(**A ).images
snake_case : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case : List[Any] = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> str:
snake_case : Any = 0
def callback_fn(A , A , A ) -> None:
snake_case : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case : Union[str, Any] = latents[0, -3:, -3:, -1]
snake_case : int = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case : List[str] = latents[0, -3:, -3:, -1]
snake_case : Tuple = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case : Optional[Any] = False
snake_case : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=A , torch_dtype=torch.floataa )
snake_case : Any = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : str = self.get_inputs()
pipe(**A , callback=A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase ( self ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=A , torch_dtype=torch.floataa )
snake_case : List[str] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case : Optional[int] = self.get_inputs()
snake_case : Dict = pipe(**A )
snake_case : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def UpperCAmelCase ( self ) -> Any:
snake_case : List[str] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case : List[str] = inputs["""image"""].resize((5_0_4, 5_0_4) )
snake_case : Optional[Any] = """timbrooks/instruct-pix2pix"""
snake_case : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : int = pipe(**A )
snake_case : Dict = output.images[0]
snake_case : Any = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
snake_case : Tuple = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 684
|
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class a ( lowercase ):
UpperCamelCase : "DiagonalGaussianDistribution"
class a ( lowercase , lowercase ):
UpperCamelCase : Tuple = True
@register_to_config
def __init__( self , UpperCamelCase_ = 3 , UpperCamelCase_ = 3 , UpperCamelCase_ = ("DownEncoderBlock2D",) , UpperCamelCase_ = ("UpDecoderBlock2D",) , UpperCamelCase_ = (64,) , UpperCamelCase_ = 1 , UpperCamelCase_ = "silu" , UpperCamelCase_ = 4 , UpperCamelCase_ = 32 , UpperCamelCase_ = 32 , UpperCamelCase_ = 0.18215 , ):
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ : Dict = Encoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , down_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , double_z=UpperCamelCase_ , )
# pass init params to Decoder
UpperCAmelCase__ : Union[str, Any] = Decoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , up_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , act_fn=UpperCamelCase_ , )
UpperCAmelCase__ : Tuple = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase__ : Dict = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[Any] = False
# only relevant if vae tiling is enabled
UpperCAmelCase__ : Optional[Any] = self.config.sample_size
UpperCAmelCase__ : Optional[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase__ : Optional[Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase__ : Optional[int] = 0.25
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False ):
if isinstance(UpperCamelCase_ , (Encoder, Decoder) ):
UpperCAmelCase__ : int = value
def __snake_case ( self , UpperCamelCase_ = True ):
UpperCAmelCase__ : str = use_tiling
def __snake_case ( self ):
self.enable_tiling(UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = True
def __snake_case ( self ):
UpperCAmelCase__ : Dict = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __snake_case ( self ):
UpperCAmelCase__ : Dict = {}
def fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if hasattr(UpperCamelCase_ , 'set_processor' ):
UpperCAmelCase__ : Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase_ , UpperCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return processors
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Any = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase_ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if hasattr(UpperCamelCase_ , 'set_processor' ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
module.set_processor(UpperCamelCase_ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase_ , UpperCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __snake_case ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase_ , return_dict=UpperCamelCase_ )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase__ : Optional[Any] = [self.encoder(UpperCamelCase_ ) for x_slice in x.split(1 )]
UpperCAmelCase__ : Union[str, Any] = torch.cat(UpperCamelCase_ )
else:
UpperCAmelCase__ : Optional[Any] = self.encoder(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = self.quant_conv(UpperCamelCase_ )
UpperCAmelCase__ : int = DiagonalGaussianDistribution(UpperCamelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase_ , return_dict=UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = self.post_quant_conv(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = self.decoder(UpperCamelCase_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
@apply_forward_hook
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = True ):
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase__ : Dict = [self._decode(UpperCamelCase_ ).sample for z_slice in z.split(1 )]
UpperCAmelCase__ : str = torch.cat(UpperCamelCase_ )
else:
UpperCAmelCase__ : str = self._decode(UpperCamelCase_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[int] = min(a.shape[2] , b.shape[2] , UpperCamelCase_ )
for y in range(UpperCamelCase_ ):
UpperCAmelCase__ : Optional[int] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = min(a.shape[3] , b.shape[3] , UpperCamelCase_ )
for x in range(UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = True ):
UpperCAmelCase__ : Dict = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase__ : Union[str, Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase__ : Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase__ : Optional[int] = []
for i in range(0 , x.shape[2] , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[int] = []
for j in range(0 , x.shape[3] , UpperCamelCase_ ):
UpperCAmelCase__ : Dict = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase__ : Any = self.encoder(UpperCamelCase_ )
UpperCAmelCase__ : Any = self.quant_conv(UpperCamelCase_ )
row.append(UpperCamelCase_ )
rows.append(UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = []
for i, row in enumerate(UpperCamelCase_ ):
UpperCAmelCase__ : int = []
for j, tile in enumerate(UpperCamelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase__ : Tuple = self.blend_v(rows[i - 1][j] , UpperCamelCase_ , UpperCamelCase_ )
if j > 0:
UpperCAmelCase__ : Optional[Any] = self.blend_h(row[j - 1] , UpperCamelCase_ , UpperCamelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase_ , dim=3 ) )
UpperCAmelCase__ : Any = torch.cat(UpperCamelCase_ , dim=2 )
UpperCAmelCase__ : int = DiagonalGaussianDistribution(UpperCamelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = True ):
UpperCAmelCase__ : Union[str, Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase__ : str = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase__ : Tuple = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase__ : List[str] = []
for i in range(0 , z.shape[2] , UpperCamelCase_ ):
UpperCAmelCase__ : Any = []
for j in range(0 , z.shape[3] , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase__ : List[Any] = self.post_quant_conv(UpperCamelCase_ )
UpperCAmelCase__ : int = self.decoder(UpperCamelCase_ )
row.append(UpperCamelCase_ )
rows.append(UpperCamelCase_ )
UpperCAmelCase__ : Any = []
for i, row in enumerate(UpperCamelCase_ ):
UpperCAmelCase__ : Dict = []
for j, tile in enumerate(UpperCamelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase__ : str = self.blend_v(rows[i - 1][j] , UpperCamelCase_ , UpperCamelCase_ )
if j > 0:
UpperCAmelCase__ : Tuple = self.blend_h(row[j - 1] , UpperCamelCase_ , UpperCamelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase_ , dim=3 ) )
UpperCAmelCase__ : Tuple = torch.cat(UpperCamelCase_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , ):
UpperCAmelCase__ : Optional[int] = sample
UpperCAmelCase__ : Optional[Any] = self.encode(UpperCamelCase_ ).latent_dist
if sample_posterior:
UpperCAmelCase__ : List[Any] = posterior.sample(generator=UpperCamelCase_ )
else:
UpperCAmelCase__ : Union[str, Any] = posterior.mode()
UpperCAmelCase__ : List[str] = self.decode(UpperCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
| 110
|
"""simple docstring"""
from itertools import permutations
def lowerCamelCase ( _snake_case ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCAmelCase__ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(_snake_case ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCamelCase ( _snake_case = 10 ):
return sum(
int(''.join(map(_snake_case ,_snake_case ) ) )
for num in permutations(range(_snake_case ) )
if is_substring_divisible(_snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 110
| 1
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCamelCase: Optional[Any] =logging.get_logger(__name__)
class __lowercase( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : Optional[int] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 255 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_lowerCAmelCase : List[Any] , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {'shortest_edge': 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name='crop_size' )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : int , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase = int((256 / 224) * size['shortest_edge'] )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_lowerCAmelCase , size=(size_dict['height'], size_dict['width']) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : str , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size['height'], size['width']) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Union[str, Any] , ) -> np.ndarray:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : str , ) -> np.ndarray:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict , _lowerCAmelCase : ImageInput , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Dict[str, int]] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Dict[str, int]] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[float] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Union[float, Iterable[float]]] = None , _lowerCAmelCase : Optional[Union[float, Iterable[float]]] = None , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase : str , ) -> BatchFeature:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name='crop_size' )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 701
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCamelCase: Optional[int] =TypeVar('T')
class __lowercase( Generic[T] ):
"""simple docstring"""
def __init__( self : str , _lowerCAmelCase : T ) -> Optional[int]:
_lowerCAmelCase = data
_lowerCAmelCase = None
def __str__( self : Union[str, Any] ) -> str:
return F'''{self.data}'''
class __lowercase( Generic[T] ):
"""simple docstring"""
def __init__( self : Any ) -> None:
_lowerCAmelCase = None
def __iter__( self : Dict ) -> Iterator[T]:
_lowerCAmelCase = self.top
while node:
yield node.data
_lowerCAmelCase = node.next
def __str__( self : str ) -> str:
return "->".join([str(_lowerCAmelCase ) for item in self] )
def __len__( self : Dict ) -> int:
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> bool:
return self.top is None
def SCREAMING_SNAKE_CASE_ ( self : Tuple , _lowerCAmelCase : T ) -> None:
_lowerCAmelCase = Node(_lowerCAmelCase )
if not self.is_empty():
_lowerCAmelCase = self.top
_lowerCAmelCase = node
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> T:
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _lowerCAmelCase )
_lowerCAmelCase = self.top
_lowerCAmelCase = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> T:
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> None:
_lowerCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 585
| 0
|
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a__ : int = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(a_ , a_ )
requires_backends(a_ , '''sklearn''' )
return (preds == labels).mean()
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
"""simple docstring"""
warnings.warn(a_ , a_ )
requires_backends(a_ , '''sklearn''' )
UpperCAmelCase = simple_accuracy(a_ , a_ )
UpperCAmelCase = fa_score(y_true=a_ , y_pred=a_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
"""simple docstring"""
warnings.warn(a_ , a_ )
requires_backends(a_ , '''sklearn''' )
UpperCAmelCase = pearsonr(a_ , a_ )[0]
UpperCAmelCase = spearmanr(a_ , a_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
"""simple docstring"""
warnings.warn(a_ , a_ )
requires_backends(a_ , '''sklearn''' )
assert len(a_ ) == len(a_ ), f"Predictions and labels have mismatched lengths {len(a_ )} and {len(a_ )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(a_ , a_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(a_ , a_ )}
elif task_name == "mrpc":
return acc_and_fa(a_ , a_ )
elif task_name == "sts-b":
return pearson_and_spearman(a_ , a_ )
elif task_name == "qqp":
return acc_and_fa(a_ , a_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(a_ , a_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(a_ , a_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(a_ , a_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(a_ , a_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(a_ , a_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(a_ , a_ )}
else:
raise KeyError(a_ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
"""simple docstring"""
warnings.warn(a_ , a_ )
requires_backends(a_ , '''sklearn''' )
if len(a_ ) != len(a_ ):
raise ValueError(f"Predictions and labels have mismatched lengths {len(a_ )} and {len(a_ )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(a_ , a_ )}
else:
raise KeyError(a_ )
| 51
|
def lowerCamelCase ( a_ ) -> list:
lowerCAmelCase_ = len(a_ )
for _ in range(a_ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase_ , lowerCAmelCase_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCamelCase_ = list(range(1_0, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 318
| 0
|
def lowerCamelCase_ ( lowerCAmelCase: str )-> int:
_snake_case : Any = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case : str = hex_num[0] == '-'
if is_negative:
_snake_case : List[str] = hex_num[1:]
try:
_snake_case : Union[str, Any] = int(lowerCAmelCase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case : Dict = ''
while int_num > 0:
_snake_case : Optional[int] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669
| 0
|
"""simple docstring"""
from collections import Counter
from timeit import timeit
def A ( __snake_case: Optional[int] = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( __snake_case: Optional[Any] = "" ) -> bool:
"""simple docstring"""
if len(_a ) == 0:
return True
__magic_name__ = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__magic_name__ = {}
for character in lower_case_input_str:
__magic_name__ = character_freq_dict.get(_a , 0 ) + 1
__magic_name__ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( __snake_case: List[Any] = "" ) -> None:
"""simple docstring"""
print('\nFor string = ' , _a , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_a ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_a ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
snake_case : Union[str, Any] = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
snake_case : List[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 545
|
def lowercase ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 ,999 )
for b in range(_a ,999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 137
| 0
|
import re
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
if len(re.findall("[ATCG]" , lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 587
|
from __future__ import annotations
import math
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : str = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
_lowerCAmelCase : Dict = math.log(len(lowerCAmelCase__ ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 587
| 1
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
lowerCamelCase_ = False
if "finetuned" in model_name:
lowerCamelCase_ = 'huggingface/label-files'
if "kinetics" in model_name:
lowerCamelCase_ = 4_00
lowerCamelCase_ = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
lowerCamelCase_ = 1_74
lowerCamelCase_ = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
lowerCamelCase_ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
if "small" in model_name:
lowerCamelCase_ = 3_84
lowerCamelCase_ = 15_36
lowerCamelCase_ = 12
lowerCamelCase_ = 16
lowerCamelCase_ = 12
lowerCamelCase_ = 3
lowerCamelCase_ = 1_92
lowerCamelCase_ = 7_68
elif "large" in model_name:
lowerCamelCase_ = 10_24
lowerCamelCase_ = 40_96
lowerCamelCase_ = 24
lowerCamelCase_ = 16
lowerCamelCase_ = 12
lowerCamelCase_ = 8
lowerCamelCase_ = 5_12
lowerCamelCase_ = 20_48
elif "huge" in model_name:
lowerCamelCase_ = 12_80
lowerCamelCase_ = 51_20
lowerCamelCase_ = 32
lowerCamelCase_ = 16
lowerCamelCase_ = 12
lowerCamelCase_ = 8
lowerCamelCase_ = 6_40
lowerCamelCase_ = 25_60
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
if "encoder." in name:
lowerCamelCase_ = name.replace('encoder.' , '' )
if "cls_token" in name:
lowerCamelCase_ = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
lowerCamelCase_ = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase_ = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
lowerCamelCase_ = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
lowerCamelCase_ = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
lowerCamelCase_ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
lowerCamelCase_ = name.replace('attn' , 'attention.self' )
if "attn" in name:
lowerCamelCase_ = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
lowerCamelCase_ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase_ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
lowerCamelCase_ = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
lowerCamelCase_ = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
lowerCamelCase_ = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase_ = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase_ = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
lowerCamelCase_ = name.replace('head' , 'classifier' )
return name
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : str ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowercase )
if key.startswith('encoder.' ):
lowerCamelCase_ = key.replace('encoder.' , '' )
if "qkv" in key:
lowerCamelCase_ = key.split('.' )
if key.startswith('decoder.blocks' ):
lowerCamelCase_ = config.decoder_hidden_size
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = 'decoder.decoder_layers.'
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = config.hidden_size
lowerCamelCase_ = int(key_split[1] )
lowerCamelCase_ = 'videomae.encoder.layer.'
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowerCamelCase_ = np.load(lowercase )
return list(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_videomae_config(lowercase )
if "finetuned" in model_name:
lowerCamelCase_ = VideoMAEForVideoClassification(lowercase )
else:
lowerCamelCase_ = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase_ = 'pytorch_model.bin'
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
lowerCamelCase_ = torch.load(lowercase , map_location='cpu' )
if "model" in files:
lowerCamelCase_ = files['model']
else:
lowerCamelCase_ = files['module']
lowerCamelCase_ = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
lowerCamelCase_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase_ = prepare_video()
lowerCamelCase_ = image_processor(lowercase , return_tensors='pt' )
if "finetuned" not in model_name:
lowerCamelCase_ = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowerCamelCase_ = torch.load(lowercase )
lowerCamelCase_ = model(**lowercase )
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase_ = torch.Size([1, 4_00] )
lowerCamelCase_ = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase_ = torch.Size([1, 1_74] )
lowerCamelCase_ = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
lowerCamelCase_ = torch.Size([1, 14_08, 15_36] )
lowerCamelCase_ = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
lowerCamelCase_ = torch.Size([1, 14_08, 15_36] )
lowerCamelCase_ = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase_ = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
lowerCamelCase_ = torch.Size([1, 14_08, 15_36] )
lowerCamelCase_ = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase_ = torch.Size([1, 4_00] )
lowerCamelCase_ = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase_ = torch.Size([1, 4_00] )
lowerCamelCase_ = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase_ = torch.Size([1, 4_00] )
lowerCamelCase_ = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase_ = torch.Size([1, 4_00] )
lowerCamelCase_ = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase_ = torch.Size([1, 14_08, 15_36] )
lowerCamelCase_ = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase_ = torch.Size([1, 1_74] )
lowerCamelCase_ = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase_ = torch.Size([1, 14_08, 15_36] )
lowerCamelCase_ = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase_ = torch.Size([1, 1_74] )
lowerCamelCase_ = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase_ = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(lowercase , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase : List[str] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 70
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowercase__ :List[Any] = Lock()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCAmelCase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__UpperCAmelCase : str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__UpperCAmelCase : List[Any] = min(UpperCAmelCase_ , UpperCAmelCase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCAmelCase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__UpperCAmelCase : Union[str, Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__UpperCAmelCase : Union[str, Any] = max(UpperCAmelCase_ , UpperCAmelCase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__UpperCAmelCase : Optional[int] = Pipe()
__UpperCAmelCase : str = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__UpperCAmelCase : Optional[int] = temp_rs
__UpperCAmelCase : Any = temp_rr
for i in range(1 , len(UpperCAmelCase_ ) - 1 ):
__UpperCAmelCase : List[Any] = Pipe()
__UpperCAmelCase : List[Any] = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__UpperCAmelCase : int = temp_rs
__UpperCAmelCase : Optional[Any] = temp_rr
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(
len(UpperCAmelCase_ ) - 1,
arr[len(UpperCAmelCase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCAmelCase_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCAmelCase_ ) ):
__UpperCAmelCase : str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCamelCase_ ( ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*UpperCAmelCase_ )
__UpperCAmelCase : int = odd_even_transposition(UpperCAmelCase_ )
print('''Sorted List\n''' )
print(*UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 522
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
SCREAMING_SNAKE_CASE : str = 'xvjiarui/stable-diffusion-2-inpainting'
SCREAMING_SNAKE_CASE : Optional[int] = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowercase__ , safety_checker=lowercase__ )
SCREAMING_SNAKE_CASE : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
SCREAMING_SNAKE_CASE : List[str] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = 50
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE : Any = num_samples * [prompt]
SCREAMING_SNAKE_CASE : List[Any] = num_samples * [init_image]
SCREAMING_SNAKE_CASE : Dict = num_samples * [mask_image]
SCREAMING_SNAKE_CASE : Any = pipeline.prepare_inputs(lowercase__ , lowercase__ , lowercase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE : Union[str, Any] = replicate(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(lowercase__ , jax.device_count() )
SCREAMING_SNAKE_CASE : Union[str, Any] = shard(lowercase__ )
SCREAMING_SNAKE_CASE : str = shard(lowercase__ )
SCREAMING_SNAKE_CASE : int = shard(lowercase__ )
SCREAMING_SNAKE_CASE : Any = pipeline(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , jit=lowercase__ )
SCREAMING_SNAKE_CASE : str = output.images.reshape(lowercase__ , 512 , 512 , 3 )
SCREAMING_SNAKE_CASE : int = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE : Any = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 701
|
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=99 , lowercase__=64 , lowercase__=5 , lowercase__=4 , lowercase__=64 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ) -> int:
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def _UpperCamelCase ( self ) -> Union[str, Any]:
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ) -> Tuple:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = MPNetModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = MPNetForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(
lowercase__ , attention_mask=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MPNetForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE : Any = MPNetForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
lowercase__ , attention_mask=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = MPNetForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : str = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : int = True
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : int = MPNetModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Tuple = MPNetModel.from_pretrained('microsoft/mpnet-base' )
SCREAMING_SNAKE_CASE : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase__ )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase__ )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1E-4 ) )
| 179
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowerCAmelCase ( A_ : int ) -> List[str]:
# initialize config
if "resnet-50" in model_name:
__UpperCAmelCase = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
__UpperCAmelCase = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
__UpperCAmelCase = DetrConfig(use_timm_backbone=A_ , backbone_config=A_ )
# set label attributes
__UpperCAmelCase = "panoptic" in model_name
if is_panoptic:
__UpperCAmelCase = 2_50
else:
__UpperCAmelCase = 91
__UpperCAmelCase = "huggingface/label-files"
__UpperCAmelCase = "coco-detection-id2label.json"
__UpperCAmelCase = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
__UpperCAmelCase = {int(A_ ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowerCAmelCase ( A_ : str ) -> Optional[Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
__UpperCAmelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def __lowerCAmelCase ( A_ : Any , A_ : int , A_ : Any ) -> Optional[Any]:
__UpperCAmelCase = state_dict.pop(A_ )
__UpperCAmelCase = val
def __lowerCAmelCase ( A_ : Tuple , A_ : Tuple=False ) -> Dict:
__UpperCAmelCase = ""
if is_panoptic:
__UpperCAmelCase = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase = in_proj_weight[:2_56, :]
__UpperCAmelCase = in_proj_bias[:2_56]
__UpperCAmelCase = in_proj_weight[2_56:5_12, :]
__UpperCAmelCase = in_proj_bias[2_56:5_12]
__UpperCAmelCase = in_proj_weight[-2_56:, :]
__UpperCAmelCase = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase = in_proj_weight[:2_56, :]
__UpperCAmelCase = in_proj_bias[:2_56]
__UpperCAmelCase = in_proj_weight[2_56:5_12, :]
__UpperCAmelCase = in_proj_bias[2_56:5_12]
__UpperCAmelCase = in_proj_weight[-2_56:, :]
__UpperCAmelCase = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
__UpperCAmelCase = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
__UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__UpperCAmelCase = in_proj_weight_cross_attn[:2_56, :]
__UpperCAmelCase = in_proj_bias_cross_attn[:2_56]
__UpperCAmelCase = in_proj_weight_cross_attn[2_56:5_12, :]
__UpperCAmelCase = in_proj_bias_cross_attn[2_56:5_12]
__UpperCAmelCase = in_proj_weight_cross_attn[-2_56:, :]
__UpperCAmelCase = in_proj_bias_cross_attn[-2_56:]
def __lowerCAmelCase ( ) -> Tuple:
__UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( A_ : List[Any] , A_ : Dict=None , A_ : Union[str, Any]=False ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = get_detr_config(A_ )
# load original model from torch hub
__UpperCAmelCase = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
__UpperCAmelCase = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=A_ ).eval()
__UpperCAmelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(A_ ):
if is_panoptic:
__UpperCAmelCase = "detr." + src
rename_key(A_ , A_ , A_ )
# query, key and value matrices need special treatment
read_in_q_k_v(A_ , is_panoptic=A_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCAmelCase = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__UpperCAmelCase = state_dict.pop(A_ )
__UpperCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCAmelCase = state_dict.pop(A_ )
__UpperCAmelCase = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__UpperCAmelCase = state_dict.pop(A_ )
__UpperCAmelCase = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__UpperCAmelCase = state_dict.pop(A_ )
__UpperCAmelCase = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase = DetrForSegmentation(A_ ) if is_panoptic else DetrForObjectDetection(A_ )
model.load_state_dict(A_ )
model.eval()
# verify our conversion on an image
__UpperCAmelCase = "coco_panoptic" if is_panoptic else "coco_detection"
__UpperCAmelCase = DetrImageProcessor(format=A_ )
__UpperCAmelCase = processor(images=prepare_img() , return_tensors="pt" )
__UpperCAmelCase = encoding["pixel_values"]
__UpperCAmelCase = detr(A_ )
__UpperCAmelCase = model(A_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
processor.save_pretrained(A_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 221
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
a_ = 4
a_ = 3
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
pass
def __lowerCAmelCase ( A_ : List[str] ) -> List[Any]:
for shard in shards:
for i in range(A_ ):
yield {"i": i, "shard": shard}
def __lowerCAmelCase ( ) -> List[str]:
__UpperCAmelCase = int(os.environ["RANK"] )
__UpperCAmelCase = int(os.environ["WORLD_SIZE"] )
__UpperCAmelCase = ArgumentParser()
parser.add_argument("--streaming" , type=A_ )
parser.add_argument("--local_rank" , type=A_ )
parser.add_argument("--num_workers" , type=A_ , default=0 )
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.streaming
__UpperCAmelCase = args.num_workers
__UpperCAmelCase = {"shards": [F'''shard_{shard_idx}''' for shard_idx in range(A_ )]}
__UpperCAmelCase = IterableDataset.from_generator(A_ , gen_kwargs=A_ )
if not streaming:
__UpperCAmelCase = Dataset.from_list(list(A_ ) )
__UpperCAmelCase = split_dataset_by_node(A_ , rank=A_ , world_size=A_ )
__UpperCAmelCase = torch.utils.data.DataLoader(A_ , num_workers=A_ )
__UpperCAmelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__UpperCAmelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__UpperCAmelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 221
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Tuple = ['''pixel_values''']
def __init__(self : List[str] , A__ : bool = True , A__ : Dict[str, int] = None , A__ : float = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Union[int, float] = 1 / 2_5_5 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : Union[str, Any] , ) -> None:
super().__init__(**A__ )
lowercase = size if size is not None else {"shortest_edge": 3_8_4}
lowercase = get_size_dict(A__ , default_to_square=A__ )
lowercase = do_resize
lowercase = size
# Default value set here for backwards compatibility where the value in config is None
lowercase = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowercase = resample
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ (self : Optional[Any] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : float , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Optional[int] , ) -> np.ndarray:
lowercase = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
lowercase = size["shortest_edge"]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowercase = int(shortest_edge / crop_pct )
lowercase = get_resize_output_image_size(A__ , size=A__ , default_to_square=A__ )
lowercase = resize(image=A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=A__ , size=(shortest_edge, shortest_edge) , data_format=A__ , **A__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
A__ , size=(shortest_edge, shortest_edge) , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ (self : Tuple , A__ : np.ndarray , A__ : Union[int, float] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> Union[str, Any]:
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ (self : int , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Dict , ) -> np.ndarray:
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ (self : Dict , A__ : ImageInput , A__ : bool = None , A__ : Dict[str, int] = None , A__ : float = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : float = None , A__ : bool = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : ChannelDimension = ChannelDimension.FIRST , **A__ : Tuple , ) -> PIL.Image.Image:
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = crop_pct if crop_pct is not None else self.crop_pct
lowercase = resample if resample is not None else self.resample
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = size if size is not None else self.size
lowercase = get_size_dict(A__ , default_to_square=A__ )
lowercase = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(A__ ) for image in images]
if do_resize:
lowercase = [self.resize(image=A__ , size=A__ , crop_pct=A__ , resample=A__ ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
lowercase = [to_channel_dimension_format(A__ , A__ ) for image in images]
lowercase = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 459
|
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__lowerCamelCase : Any = open # noqa: we just need to have a builtin inside this module to test it properly
| 459
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
for attribute in key.split(""".""" ):
lowercase__ = getattr(_lowercase , _lowercase )
if weight_type is not None:
lowercase__ = getattr(_lowercase , _lowercase ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == """group""" , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(_lowercase )[0].split(""".""" )[-2]
lowercase__ = mapped_key.replace("""*""" , _lowercase )
if "weight_g" in name:
lowercase__ = """weight_g"""
elif "weight_v" in name:
lowercase__ = """weight_v"""
elif "weight" in name:
lowercase__ = """weight"""
elif "bias" in name:
lowercase__ = """bias"""
else:
lowercase__ = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = full_name.split("""conv_layers.""" )[-1]
lowercase__ = name.split(""".""" )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
def _A ( lowercase__ , lowercase__ ):
lowercase__ = SEWConfig()
if is_finetuned:
lowercase__ = model.wav_encoder.wav_model.cfg
else:
lowercase__ = model.cfg
lowercase__ = fs_config.conv_bias
lowercase__ = eval(fs_config.conv_feature_layers )
lowercase__ = [x[0] for x in conv_layers]
lowercase__ = [x[1] for x in conv_layers]
lowercase__ = [x[2] for x in conv_layers]
lowercase__ = """gelu"""
lowercase__ = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
lowercase__ = 0.0
lowercase__ = fs_config.activation_fn.name
lowercase__ = fs_config.encoder_embed_dim
lowercase__ = 0.0_2
lowercase__ = fs_config.encoder_ffn_embed_dim
lowercase__ = 1e-5
lowercase__ = fs_config.encoder_layerdrop
lowercase__ = fs_config.encoder_attention_heads
lowercase__ = fs_config.conv_pos_groups
lowercase__ = fs_config.conv_pos
lowercase__ = len(_lowercase )
lowercase__ = fs_config.encoder_layers
lowercase__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase__ = model.cfg
lowercase__ = fs_config.final_dropout
lowercase__ = fs_config.layerdrop
lowercase__ = fs_config.activation_dropout
lowercase__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase__ = fs_config.attention_dropout
lowercase__ = fs_config.dropout_input
lowercase__ = fs_config.dropout
lowercase__ = fs_config.mask_channel_length
lowercase__ = fs_config.mask_channel_prob
lowercase__ = fs_config.mask_length
lowercase__ = fs_config.mask_prob
lowercase__ = """Wav2Vec2FeatureExtractor"""
lowercase__ = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _A ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True ):
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase__ = SEWConfig.from_pretrained(_lowercase )
else:
lowercase__ = convert_config(model[0] , _lowercase )
lowercase__ = model[0].eval()
lowercase__ = True if config.feat_extract_norm == """layer""" else False
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
if is_finetuned:
if dict_path:
lowercase__ = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.eos_index
lowercase__ = len(target_dict.symbols )
lowercase__ = os.path.join(_lowercase , """vocab.json""" )
if not os.path.isdir(_lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _lowercase )
lowercase__ = WavaVecaCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowercase , )
lowercase__ = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
lowercase__ = SEWForCTC(_lowercase )
else:
lowercase__ = SEWModel(_lowercase )
feature_extractor.save_pretrained(_lowercase )
recursively_load_weights(_lowercase , _lowercase , _lowercase )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 325
|
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase :Optional[Any] = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a ( lowerCamelCase__ ):
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
A_ : int = [image]
A_ : int = [trans(img.convert("""RGB""" ) ) for img in image]
A_ : Dict = torch.stack(lowerCamelCase__ )
return image
class _lowerCAmelCase ( snake_case__ ):
def __init__(self , lowercase , lowercase ):
super().__init__()
# make sure scheduler can always be converted to DDIM
A_ : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_A , scheduler=_A )
def _a (self , lowercase ):
if strength < 0 or strength > 1:
raise ValueError(F'The value of strength should in [0.0, 1.0] but is {strength}' )
def _a (self , lowercase , lowercase , lowercase ):
# get the original timestep using init_timestep
A_ : Optional[int] = min(int(num_inference_steps * strength ) , _A )
A_ : str = max(num_inference_steps - init_timestep , 0 )
A_ : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}' )
A_ : Union[str, Any] = image.to(device=_A , dtype=_A )
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_A )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
A_ : Dict = init_latents.shape
A_ : Optional[int] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
print("""add noise to latents at timestep""" , _A )
A_ : List[Any] = self.scheduler.add_noise(_A , _A , _A )
A_ : Optional[int] = init_latents
return latents
@torch.no_grad()
def __call__(self , lowercase = None , lowercase = 0.8 , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ):
self.check_inputs(_A )
# 2. Preprocess image
A_ : int = preprocess(_A )
# 3. set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
A_ : Tuple = self.get_timesteps(_A , _A , self.device )
A_ : int = timesteps[:1].repeat(_A )
# 4. Prepare latent variables
A_ : Dict = self.prepare_latents(_A , _A , _A , self.unet.dtype , self.device , _A )
A_ : List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(_A ):
# 1. predict noise model_output
A_ : Optional[Any] = self.unet(_A , _A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A_ : List[Any] = self.scheduler.step(
_A , _A , _A , eta=_A , use_clipped_model_output=_A , generator=_A , ).prev_sample
A_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(_A )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_A )
| 711
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 686
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : List[str] = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ["""DPTFeatureExtractor"""]
lowerCAmelCase_ : List[str] = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 435
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Any = ['image_processor', 'tokenizer']
__UpperCAmelCase : List[str] = 'FlavaImageProcessor'
__UpperCAmelCase : Dict = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self : int , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
UpperCAmelCase__ = kwargs.pop("feature_extractor" )
UpperCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.image_processor
def __call__(self : Optional[int] , __UpperCAmelCase : Optional[ImageInput] = None , __UpperCAmelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Union[bool, str, TruncationStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase__ = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if images is not None:
UpperCAmelCase__ = self.image_processor(
__UpperCAmelCase , return_image_mask=__UpperCAmelCase , return_codebook_pixels=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(__UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : int , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer.model_input_names
UpperCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 486
| 0
|
import operator as op
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: Optional[int] = lambda lowerCamelCase , lowerCamelCase : int(x / y ) # noqa: E731 integer division operation
UpperCamelCase_: Any = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(lowerCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowerCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(lowerCamelCase ) , sep=""" | """ )
else:
UpperCamelCase_: Optional[int] = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(lowerCamelCase ) , sep=""" | """ )
UpperCamelCase_: Optional[Any] = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(lowerCamelCase ) , sep=""" | """ )
stack.append(
str(opr[x](int(lowerCamelCase ) , int(lowerCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(lowerCamelCase ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase_ : Dict = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 704
|
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__magic_name__ : str = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__magic_name__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 281
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
__magic_name__ : int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
__magic_name__ : Any = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case (lowerCamelCase , unittest.TestCase ):
__a = CamembertTokenizer
__a = CamembertTokenizerFast
__a = True
__a = True
def __a ( self: List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = """<pad>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __a ( self: Any ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A_ ) , 10_04 )
def __a ( self: Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def __a ( self: Optional[Any] ):
__lowerCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
__lowerCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.encode(A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowerCamelCase = tokenizer.convert_ids_to_tokens(A_ )
__lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
def __a ( self: List[str] ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.tokenize(A_ )
__lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def __a ( self: Optional[Any] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowerCamelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=A_ , )
| 281
| 1
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _snake_case (_snake_case : int = 3) -> qiskit.result.counts.Counts:
if isinstance(_snake_case , _snake_case):
raise TypeError('number of qubits must be a integer.')
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.')
if math.floor(_snake_case) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.')
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).')
_lowercase =QuantumRegister(_snake_case , 'qr')
_lowercase =ClassicalRegister(_snake_case , 'cr')
_lowercase =QuantumCircuit(_snake_case , _snake_case)
_lowercase =number_of_qubits
for i in range(_snake_case):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(_snake_case):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case)
# simulate with 10000 shots
_lowercase =Aer.get_backend('qasm_simulator')
_lowercase =execute(_snake_case , _snake_case , shots=1_0000)
return job.result().get_counts(_snake_case)
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 557
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Tuple, snake_case :Tuple=False, snake_case :Optional[int]=False, snake_case :Optional[Any]=6.0, snake_case :Optional[Any]=None, snake_case :Optional[Any]=False, snake_case :List[str]=False, snake_case :Optional[int]=None, snake_case :List[Any]="fp4", snake_case :Dict=False, **snake_case :Optional[int], ):
"""simple docstring"""
_lowercase =load_in_abit
_lowercase =load_in_abit
_lowercase =llm_inta_threshold
_lowercase =llm_inta_skip_modules
_lowercase =llm_inta_enable_fpaa_cpu_offload
_lowercase =llm_inta_has_fpaa_weight
_lowercase =bnb_abit_quant_type
_lowercase =bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_lowercase =torch.floataa
elif isinstance(snake_case, snake_case):
_lowercase =getattr(snake_case, snake_case)
elif isinstance(snake_case, torch.dtype):
_lowercase =bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype')
self.post_init()
def UpperCamelCase__ ( self :int):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold, snake_case):
raise ValueError('llm_int8_threshold must be a float')
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules, snake_case):
raise ValueError('llm_int8_skip_modules must be a list of strings')
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload, snake_case):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean')
if not isinstance(self.llm_inta_has_fpaa_weight, snake_case):
raise ValueError('llm_int8_has_fp16_weight must be a boolean')
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype, torch.dtype):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype')
if not isinstance(self.bnb_abit_quant_type, snake_case):
raise ValueError('bnb_4bit_quant_type must be a string')
if not isinstance(self.bnb_abit_use_double_quant, snake_case):
raise ValueError('bnb_4bit_use_double_quant must be a boolean')
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes')) >= version.parse(
'0.39.0'):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version')
def UpperCamelCase__ ( self :int):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCamelCase__ ( cls :Optional[int], snake_case :List[Any], snake_case :List[Any], **snake_case :Tuple):
"""simple docstring"""
_lowercase =cls(**snake_case)
_lowercase =[]
for key, value in kwargs.items():
if hasattr(snake_case, snake_case):
setattr(snake_case, snake_case, snake_case)
to_remove.append(snake_case)
for key in to_remove:
kwargs.pop(snake_case, snake_case)
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCamelCase__ ( self :Any, snake_case :Union[str, os.PathLike]):
"""simple docstring"""
with open(snake_case, 'w', encoding='utf-8') as writer:
_lowercase =self.to_dict()
_lowercase =json.dumps(snake_case, indent=2, sort_keys=snake_case) + '\n'
writer.write(snake_case)
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =copy.deepcopy(self.__dict__)
_lowercase =str(output['bnb_4bit_compute_dtype']).split('.')[1]
return output
def __repr__( self :List[Any]):
"""simple docstring"""
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def UpperCamelCase__ ( self :Dict, snake_case :bool = True):
"""simple docstring"""
if use_diff is True:
_lowercase =self.to_diff_dict()
else:
_lowercase =self.to_dict()
return json.dumps(snake_case, indent=2, sort_keys=snake_case) + "\n"
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.to_dict()
# get the default config dict
_lowercase =BitsAndBytesConfig().to_dict()
_lowercase ={}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_lowercase =value
return serializable_config_dict
| 557
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( _UpperCamelCase ):
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , '''num_encoder_blocks''' ) )
class A__ :
def __init__( self : int , _a : Union[str, Any] , _a : Union[str, Any]=13 , _a : Tuple=64 , _a : Optional[int]=3 , _a : Union[str, Any]=4 , _a : Dict=[2, 2, 2, 2] , _a : Union[str, Any]=[8, 4, 2, 1] , _a : Tuple=[16, 32, 64, 128] , _a : Optional[int]=[1, 4, 8, 16] , _a : Any=[1, 2, 4, 8] , _a : Union[str, Any]=True , _a : str=True , _a : Dict="gelu" , _a : str=0.1 , _a : List[Any]=0.1 , _a : List[str]=0.02 , _a : int=3 , _a : Optional[Any]=None , ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =num_encoder_blocks
_SCREAMING_SNAKE_CASE =sr_ratios
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =downsampling_rates
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Dict , _a : Dict , _a : Optional[int] , _a : int ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SegformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_SCREAMING_SNAKE_CASE =model(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __UpperCamelCase ( self : str , _a : Dict , _a : Optional[int] , _a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =SegformerForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_SCREAMING_SNAKE_CASE =model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_SCREAMING_SNAKE_CASE =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __UpperCamelCase ( self : int , _a : int , _a : Optional[Any] , _a : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =SegformerForSemanticSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_SCREAMING_SNAKE_CASE =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
UpperCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SegformerModelTester(self )
_SCREAMING_SNAKE_CASE =SegformerConfigTester(self , config_class=UpperCamelCase__ )
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
_SCREAMING_SNAKE_CASE =outputs.attentions
_SCREAMING_SNAKE_CASE =sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
_SCREAMING_SNAKE_CASE =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
_SCREAMING_SNAKE_CASE =(self.model_tester.image_size // 4) ** 2
_SCREAMING_SNAKE_CASE =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_SCREAMING_SNAKE_CASE =(self.model_tester.image_size // 32) ** 2
_SCREAMING_SNAKE_CASE =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_SCREAMING_SNAKE_CASE =len(UpperCamelCase__ )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
_SCREAMING_SNAKE_CASE =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
_SCREAMING_SNAKE_CASE =(self.model_tester.image_size // 4) ** 2
_SCREAMING_SNAKE_CASE =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
def check_hidden_states_output(_a : Optional[Any] , _a : Any , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
_SCREAMING_SNAKE_CASE =outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ):
continue
_SCREAMING_SNAKE_CASE =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =model(**UpperCamelCase__ ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =SegformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=UpperCamelCase__ , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=UpperCamelCase__ , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) )
@slow
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=UpperCamelCase__ , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
_SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
_SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 691
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__A : Optional[Any] = logging.get_logger(__name__)
# General docstring
__A : str = "PoolFormerConfig"
# Base docstring
__A : Optional[Any] = "sail/poolformer_s12"
__A : List[Any] = [1, 512, 7, 7]
# Image classification docstring
__A : List[str] = "sail/poolformer_s12"
__A : Tuple = "tabby, tabby cat"
__A : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
A__ : Tuple =1 - drop_prob
A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor
return output
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ):
super().__init__()
A__ : Optional[int] =drop_prob
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ):
return drop_path(UpperCamelCase__ , self.drop_prob , self.training )
def _UpperCAmelCase ( self : List[str] ):
return "p={}".format(self.drop_prob )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
super().__init__()
A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride)
A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding)
A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ )
A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ):
A__ : List[str] =self.projection(UpperCamelCase__ )
A__ : Any =self.norm(UpperCamelCase__ )
return embeddings
class __lowerCAmelCase ( nn.GroupNorm):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ):
super().__init__()
A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ):
return self.pool(UpperCamelCase__ ) - hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
super().__init__()
A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Dict =PoolFormerDropPath(UpperCamelCase__ )
if isinstance(config.hidden_act , UpperCamelCase__ ):
A__ : Tuple =ACTaFN[config.hidden_act]
else:
A__ : Optional[Any] =config.hidden_act
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ):
A__ : Optional[Any] =self.conva(UpperCamelCase__ )
A__ : List[str] =self.act_fn(UpperCamelCase__ )
A__ : List[str] =self.drop(UpperCamelCase__ )
A__ : Optional[int] =self.conva(UpperCamelCase__ )
A__ : Optional[Any] =self.drop(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
super().__init__()
A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ )
A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
# Useful for training neural nets
A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity()
A__ : Optional[Any] =config.use_layer_scale
if config.use_layer_scale:
A__ : List[str] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
A__ : List[Any] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
if self.use_layer_scale:
A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) )
A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : Tuple =()
A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) )
A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A__ : str =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : List[Any] =(output,) + outputs
return outputs
else:
A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) )
# First residual connection
A__ : Optional[Any] =pooling_output + hidden_states
A__ : Tuple =()
# Second residual connection inside the PoolFormerOutput block
A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) )
A__ : Any =hidden_states + layer_output
A__ : Tuple =(output,) + outputs
return outputs
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[str] ):
super().__init__()
A__ : Tuple =config
# stochastic depth decay rule
A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
A__ : Tuple =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
A__ : List[str] =nn.ModuleList(UpperCamelCase__ )
# Transformer blocks
A__ : Union[str, Any] =[]
A__ : Any =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A__ : Union[str, Any] =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase__ ) )
A__ : str =nn.ModuleList(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ):
A__ : Union[str, Any] =() if output_hidden_states else None
A__ : Dict =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
A__ , A__ : List[Any] =layers
# Get patch embeddings from hidden_states
A__ : Any =embedding_layer(UpperCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase__ ):
A__ : List[str] =blk(UpperCamelCase__ )
A__ : Tuple =layer_outputs[0]
if output_hidden_states:
A__ : List[Any] =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = PoolFormerConfig
__magic_name__ : int = """poolformer"""
__magic_name__ : Any = """pixel_values"""
__magic_name__ : Any = True
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] =value
__A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Dict ):
super().__init__(UpperCamelCase__ )
A__ : List[Any] =config
A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCAmelCase ( self : Tuple ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : int =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
A__ : List[Any] =self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : int =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A__ : int =self.dense(UpperCamelCase__ )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str ):
super().__init__(UpperCamelCase__ )
A__ : List[str] =config.num_labels
A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ )
# Final norm
A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A__ : Dict =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
A__ : List[str] =self.poolformer(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : str =outputs[0]
A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) )
A__ : Optional[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ : int ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ : Tuple ="single_label_classification"
else:
A__ : Optional[int] ="multi_label_classification"
if self.config.problem_type == "regression":
A__ : Dict =MSELoss()
if self.num_labels == 1:
A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
A__ : Tuple =CrossEntropyLoss()
A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ : List[Any] =BCEWithLogitsLoss()
A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
A__ : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
| 656
| 0
|
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
SCREAMING_SNAKE_CASE__: int= Path(snake_case_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
SCREAMING_SNAKE_CASE__: Any= [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , snake_case_ , with_cuda=snake_case_ , extra_include_paths=[str(snake_case_ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 107
|
lowercase_ : Optional[int] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 107
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a : Union[str, Any] = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
|
from math import factorial
def UpperCamelCase ( snake_case__ , snake_case__):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k")
return factorial(snake_case__) // (factorial(snake_case__) * factorial(n - k))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"4 for group projects, there are {combinations(40, 4)} ways",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"are {combinations(10, 3)} ways that first, second and",
'''third place can be awarded.''',
)
| 659
| 0
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : Any = True
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ ,lowercase_ : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ ,lowercase_ : List[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Tuple = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
_A = [int(x) for x in input().split()]
# inputing elements of the list in one line
_A = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 438
|
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( _snake_case , unittest.TestCase ):
"""simple docstring"""
A : str = TransfoXLTokenizer
A : Optional[int] = False
A : List[str] = False
def _lowerCamelCase (self ) -> List[Any]:
super().setUp()
lowercase_ : Any = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
lowercase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowerCamelCase (self , **_a ) -> Union[str, Any]:
lowercase_ : str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCamelCase (self , _a ) -> Union[str, Any]:
lowercase_ : int = '<unk> UNwanted , running'
lowercase_ : int = '<unk> unwanted, running'
return input_text, output_text
def _lowerCamelCase (self ) -> Tuple:
lowercase_ : Any = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_a )
lowercase_ : Optional[Any] = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(_a , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [0, 4, 8, 7] )
def _lowerCamelCase (self ) -> Union[str, Any]:
lowercase_ : List[str] = TransfoXLTokenizer(lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def _lowerCamelCase (self ) -> int:
lowercase_ : Dict = TransfoXLTokenizer(lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _lowerCamelCase (self ) -> Optional[Any]:
lowercase_ : Dict = TransfoXLTokenizer(lower_case=_a )
lowercase_ : int = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
lowercase_ : List[str] = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(_a ) , _a )
self.assertEqual(tokenizer.convert_tokens_to_string(_a ) , _a )
def _lowerCamelCase (self ) -> Any:
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : str = len(_a )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_a ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 438
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class __lowercase ( __lowerCamelCase ):
snake_case_ = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Whether to SortishSamler or not."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """whether to use adafactor"""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
snake_case_ = field(
default="""linear""" , metadata={"""help""": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 65
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> None:
'''simple docstring'''
_UpperCAmelCase = generate_pascal_triangle(__lowercase )
for row_idx in range(__lowercase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def UpperCAmelCase_ ( __lowercase : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_UpperCAmelCase = []
for current_row_idx in range(__lowercase ):
_UpperCAmelCase = populate_current_row(__lowercase , __lowercase )
triangle.append(__lowercase )
return triangle
def UpperCAmelCase_ ( __lowercase : list[list[int]] , __lowercase : int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_UpperCAmelCase , _UpperCAmelCase = 1, 1
for current_col_idx in range(1 , __lowercase ):
calculate_current_element(
__lowercase , __lowercase , __lowercase , __lowercase )
return current_row
def UpperCAmelCase_ ( __lowercase : list[list[int]] , __lowercase : list[int] , __lowercase : int , __lowercase : int , ) -> None:
'''simple docstring'''
_UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx - 1]
_UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx]
_UpperCAmelCase = above_to_left_elt + above_to_right_elt
def UpperCAmelCase_ ( __lowercase : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_UpperCAmelCase = [[1]]
for row_index in range(1 , __lowercase ):
_UpperCAmelCase = [0] + result[-1] + [0]
_UpperCAmelCase = row_index + 1
# Calculate the number of distinct elements in a row
_UpperCAmelCase = sum(divmod(__lowercase , 2 ) )
_UpperCAmelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_UpperCAmelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_UpperCAmelCase = row_first_half + row_second_half
result.append(__lowercase )
return result
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowercase : Callable , __lowercase : int ) -> None:
_UpperCAmelCase = f'{func.__name__}({value})'
_UpperCAmelCase = timeit(f'__main__.{call}' , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowercase , __lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 236
| 0
|
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
snake_case_ : List[str] = [True] * (num + 1)
snake_case_ : str = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCamelCase_ ):
snake_case_ : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 267
|
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any]=0 ):
'''simple docstring'''
# Format the message.
if name is None:
snake_case_ : Tuple = None
else:
snake_case_ : Optional[Any] = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
snake_case_ : Optional[Any] = fmt.format(lowerCamelCase_ )
# Print and recurse (if needed).
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if msg is not None:
print(lowerCamelCase_ )
for k in val.keys():
recursive_print(lowerCamelCase_ , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase_ , torch.Tensor ):
print(lowerCamelCase_ , """:""" , val.size() )
else:
print(lowerCamelCase_ , """:""" , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int ):
'''simple docstring'''
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : List[Any] = param.view(*lowerCamelCase_ )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : List[str] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Tuple = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : int = param.view(*lowerCamelCase_ )
snake_case_ : str = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*lowerCamelCase_ )
return param
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple ):
'''simple docstring'''
# The converted output model.
snake_case_ : Tuple = {}
# old versions did not store training args
snake_case_ : Optional[Any] = input_state_dict.get("""args""" , lowerCamelCase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Optional[int] = ds_args.padded_vocab_size
snake_case_ : str = ds_args.max_position_embeddings
snake_case_ : Tuple = ds_args.hidden_size
snake_case_ : List[str] = ds_args.num_layers
snake_case_ : Union[str, Any] = ds_args.num_attention_heads
snake_case_ : Tuple = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : int = config.n_head
# The hidden_size per head.
snake_case_ : Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Tuple = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : Dict = 0.0
# The model.
snake_case_ : Optional[Any] = input_state_dict["""model"""]
# The language model.
snake_case_ : Optional[Any] = model["""language_model"""]
# The embeddings.
snake_case_ : int = lm["""embedding"""]
# The word embeddings.
snake_case_ : Any = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Any = word_embeddings[: config.vocab_size, :]
snake_case_ : Union[str, Any] = word_embeddings
# The position embeddings.
snake_case_ : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
snake_case_ : List[str] = pos_embeddings
# The transformer.
snake_case_ : str = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Union[str, Any] = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : Tuple = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : List[Any] = layer_re.match(lowerCamelCase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Any = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : List[str] = m.group(3 )
# The name of the layer.
snake_case_ : List[Any] = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : str = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : List[str] = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : Optional[Any] = masked_bias
snake_case_ : Optional[Any] = fix_query_key_value_ordering(lowerCamelCase_ , lowerCamelCase_ , 3 , lowerCamelCase_ , lowerCamelCase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : Any = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Dict = fix_query_key_value_ordering(lowerCamelCase_ , lowerCamelCase_ , 3 , lowerCamelCase_ , lowerCamelCase_ )
# Store. No change of shape.
snake_case_ : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : str = megatron_to_transformers[op_name]
snake_case_ : Union[str, Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : str = megatron_to_transformers[op_name]
snake_case_ : List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : str = transformer["""final_layernorm.weight"""]
snake_case_ : int = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def UpperCAmelCase ( ):
'''simple docstring'''
# Create the argument parser.
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=lowerCamelCase_ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=lowerCamelCase_ , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : Tuple = parser.parse_args()
# Extract the basename.
snake_case_ : List[str] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Dict = torch.load(lowerCamelCase_ , map_location="""cpu""" )
else:
snake_case_ : Optional[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Optional[int] = input_state_dict.get("""args""" , lowerCamelCase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Tuple = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : int = """gelu_new"""
else:
snake_case_ : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : int = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : int = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=lowerCamelCase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=lowerCamelCase_ , summary_activation=lowerCamelCase_ , summary_proj_to_labels=lowerCamelCase_ , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
snake_case_ : int = GPTaConfig.from_json_file(args.config_file )
snake_case_ : Optional[int] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Optional[Any] = convert_megatron_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase_ , lowerCamelCase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : List[str] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
snake_case_ : int = type(lowerCamelCase_ ).__name__
snake_case_ : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(lowerCamelCase_ )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase_ )
# Store the state_dict to file.
snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """pytorch_model.bin""" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 267
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.