code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCamelCase_( ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=_lowerCamelCase , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=_lowerCamelCase , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=_lowerCamelCase , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=_lowerCamelCase , default=0 , help="cuda_id." , )
_lowerCamelCase : Optional[int] = parser.parse_args()
return args
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if not len(_lowerCamelCase ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
_lowerCamelCase, _lowerCamelCase : str = imgs[0].size
_lowerCamelCase : List[Any] = Image.new("RGB" , size=(cols * w, rows * h) )
_lowerCamelCase, _lowerCamelCase : List[Any] = grid.size
for i, img in enumerate(_lowerCamelCase ):
grid.paste(_lowerCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="robotic cat with wings" , _lowerCamelCase=7.5 , _lowerCamelCase=50 , _lowerCamelCase=1 , _lowerCamelCase=42 , ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Any = torch.Generator(pipeline.device ).manual_seed(_lowerCamelCase )
_lowerCamelCase : str = pipeline(
_lowerCamelCase , guidance_scale=_lowerCamelCase , num_inference_steps=_lowerCamelCase , generator=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , ).images
_lowerCamelCase : int = int(math.sqrt(_lowerCamelCase ) )
_lowerCamelCase : str = image_grid(_lowerCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_lowerCAmelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
_lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
_lowerCAmelCase : List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
_lowerCAmelCase : Dict = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
_lowerCAmelCase : Any = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
_lowerCAmelCase : int = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_lowerCAmelCase : int = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
_lowerCAmelCase : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
_lowerCAmelCase : int = unet.to(torch.device('''cuda''', args.cuda_id))
_lowerCAmelCase : List[Any] = pipeline.to(unet.device)
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
_lowerCAmelCase : Dict = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 46
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowerCAmelCase :Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCAmelCase :str = 1_2_8_0_2_2
lowerCAmelCase :int = 1_2_8_0_2_8
@require_sentencepiece
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = MaMaaaTokenizer
A_ : Any = False
A_ : List[str] = False
A_ : Optional[Any] = True
def __lowerCAmelCase ( self : List[Any] ) -> int:
super().setUp()
__magic_name__ : int = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__magic_name__ : Optional[int] = dict(zip(_A , range(len(_A ) ) ) )
__magic_name__ : int = Path(self.tmpdirname )
save_json(_A , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__magic_name__ : Optional[Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[int] , **_A : int ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : Any , _A : str ) -> Optional[int]:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : Union[str, Any] = '</s>'
__magic_name__ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
__magic_name__ : List[str] = self.get_tokenizer()
__magic_name__ : Optional[Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(_A ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
pass
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
__magic_name__ : Any = self.get_tokenizer()
__magic_name__ : Optional[int] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2, 3, 4, 5, 6] , )
__magic_name__ : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
__magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_string(_A )
self.assertEqual(_A , 'This is a test' )
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> int:
# fmt: off
__magic_name__ : int = {'input_ids': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : str = """facebook/m2m100_418M"""
A_ : Tuple = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
A_ : Tuple = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
A_ : List[Any] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def __lowerCAmelCase ( cls : int ) -> List[str]:
__magic_name__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
__magic_name__ : Optional[int] = 1
return cls
def __lowerCAmelCase ( self : List[str] ) -> str:
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 128063 )
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.tokenizer.get_vocab()
self.assertEqual(len(_A ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , _A )
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
__magic_name__ : List[str] = 'en'
__magic_name__ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def __lowerCAmelCase ( self : int ) -> List[str]:
self.assertIn(_A , self.tokenizer.all_special_ids )
# fmt: off
__magic_name__ : List[str] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
__magic_name__ : List[Any] = self.tokenizer.decode(_A , skip_special_tokens=_A )
__magic_name__ : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
__magic_name__ : Any = tempfile.mkdtemp()
__magic_name__ : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_A )
__magic_name__ : List[str] = MaMaaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.lang_token_to_id , _A )
@require_torch
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ : Tuple = 'en'
__magic_name__ : Dict = 'fr'
__magic_name__ : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='pt' )
__magic_name__ : int = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__magic_name__ : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : int = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__magic_name__ : Optional[int] = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : Union[str, Any] = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__magic_name__ : Union[str, Any] = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(_A ) , {
# en_XX, A, test, EOS
'input_ids': [[128022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 128006,
} , )
| 561
| 0
|
"""simple docstring"""
from math import factorial
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 1_00 ):
'''simple docstring'''
return sum(int(__snake_case ) for x in str(factorial(__snake_case ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 134
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a :
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase_ ( self ):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = inputs['prompt']
lowercase = inputs['generator']
lowercase = inputs['num_inference_steps']
lowercase = inputs['output_type']
if "image" in inputs:
lowercase = inputs['image']
else:
lowercase = None
if "mask_image" in inputs:
lowercase = inputs['mask_image']
else:
lowercase = None
if "original_image" in inputs:
lowercase = inputs['original_image']
else:
lowercase = None
lowercase , lowercase = pipe.encode_prompt(_lowerCamelCase )
# inputs with prompt converted to embeddings
lowercase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
lowercase = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowerCamelCase , _lowerCamelCase ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = inputs['generator']
lowercase = inputs['num_inference_steps']
lowercase = inputs['output_type']
# inputs with prompt converted to embeddings
lowercase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
lowercase = pipe_loaded(**_lowerCamelCase )[0]
lowercase = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
def UpperCamelCase_ ( self ):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
lowercase = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = pipe_loaded(**_lowerCamelCase )[0]
lowercase = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
| 134
| 1
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def a__ ( A__, A__=False ):
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE_ : Dict = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE_ : Optional[Any] = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
lowerCAmelCase__ : Tuple =parse_flag_from_env('RUN_SLOW', default=False)
lowerCAmelCase__ : List[Any] =parse_flag_from_env('RUN_REMOTE', default=False)
lowerCAmelCase__ : Tuple =parse_flag_from_env('RUN_LOCAL', default=True)
lowerCAmelCase__ : Any =parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
lowerCAmelCase__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
lowerCAmelCase__ : int =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
lowerCAmelCase__ : Union[str, Any] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
lowerCAmelCase__ : List[Any] =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
lowerCAmelCase__ : str =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
lowerCAmelCase__ : Optional[int] =pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
lowerCAmelCase__ : Dict =pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def a__ ( A__ ):
try:
import faiss # noqa
except ImportError:
SCREAMING_SNAKE_CASE_ : str = unittest.skip('test requires faiss' )(A__ )
return test_case
def a__ ( A__ ):
try:
import regex # noqa
except ImportError:
SCREAMING_SNAKE_CASE_ : List[Any] = unittest.skip('test requires regex' )(A__ )
return test_case
def a__ ( A__ ):
try:
import elasticsearch # noqa
except ImportError:
SCREAMING_SNAKE_CASE_ : List[Any] = unittest.skip('test requires elasticsearch' )(A__ )
return test_case
def a__ ( A__ ):
try:
import sqlalchemy # noqa
except ImportError:
SCREAMING_SNAKE_CASE_ : Dict = unittest.skip('test requires sqlalchemy' )(A__ )
return test_case
def a__ ( A__ ):
if not config.TORCH_AVAILABLE:
SCREAMING_SNAKE_CASE_ : Optional[Any] = unittest.skip('test requires PyTorch' )(A__ )
return test_case
def a__ ( A__ ):
if not config.TF_AVAILABLE:
SCREAMING_SNAKE_CASE_ : int = unittest.skip('test requires TensorFlow' )(A__ )
return test_case
def a__ ( A__ ):
if not config.JAX_AVAILABLE:
SCREAMING_SNAKE_CASE_ : List[str] = unittest.skip('test requires JAX' )(A__ )
return test_case
def a__ ( A__ ):
if not config.PIL_AVAILABLE:
SCREAMING_SNAKE_CASE_ : Tuple = unittest.skip('test requires Pillow' )(A__ )
return test_case
def a__ ( A__ ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(A__ )
else:
return test_case
def a__ ( A__ ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(A__ )
else:
return test_case
def a__ ( A__ ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(A__ )
else:
return test_case
def a__ ( A__ ):
def _require_spacy_model(A__ ):
try:
import spacy # noqa F401
spacy.load(A__ )
except ImportError:
return unittest.skip('test requires spacy' )(A__ )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(A__ ) )(A__ )
else:
return test_case
return _require_spacy_model
def a__ ( A__ ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(A__ )
else:
return test_case
def a__ ( A__ ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(A__ )
else:
return test_case
def a__ ( A__ ):
if not _run_slow_tests or _run_slow_tests == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = unittest.skip('test is slow' )(A__ )
return test_case
def a__ ( A__ ):
if not _run_local_tests or _run_local_tests == 0:
SCREAMING_SNAKE_CASE_ : str = unittest.skip('test is local' )(A__ )
return test_case
def a__ ( A__ ):
if not _run_packaged_tests or _run_packaged_tests == 0:
SCREAMING_SNAKE_CASE_ : int = unittest.skip('test is packaged' )(A__ )
return test_case
def a__ ( A__ ):
if not _run_remote_tests or _run_remote_tests == 0:
SCREAMING_SNAKE_CASE_ : int = unittest.skip('test requires remote' )(A__ )
return test_case
def a__ ( *A__ ):
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(A__ ) and name.startswith('test' ):
for decorator in decorators:
SCREAMING_SNAKE_CASE_ : int = decorator(A__ )
setattr(cls, A__, A__ )
return cls
return decorate
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
pass
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 2
@contextmanager
def a__ ( A__=OfflineSimulationMode.CONNECTION_FAILS, A__=1E-16 ):
SCREAMING_SNAKE_CASE_ : Any = requests.Session().request
def timeout_request(A__, A__, A__, **A__ ):
# Change the url to an invalid url so that the connection hangs
SCREAMING_SNAKE_CASE_ : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = timeout
try:
return online_request(A__, A__, **A__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
SCREAMING_SNAKE_CASE_ : int = url
SCREAMING_SNAKE_CASE_ : Optional[int] = e.args[0]
SCREAMING_SNAKE_CASE_ : Dict = (max_retry_error.args[0].replace('10.255.255.1', F'''OfflineMock[{url}]''' ),)
SCREAMING_SNAKE_CASE_ : Optional[int] = (max_retry_error,)
raise
def raise_connection_error(A__, A__, **A__ ):
raise requests.ConnectionError('Offline mode is enabled.', request=A__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send', A__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request', A__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE', A__ ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def a__ ( *A__, **A__ ):
SCREAMING_SNAKE_CASE_ : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*A__, **A__ ) as tmp_dir:
try:
os.chdir(A__ )
yield
finally:
os.chdir(A__ )
@contextmanager
def a__ ( ):
import gc
gc.collect()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def a__ ( ):
import gc
gc.collect()
SCREAMING_SNAKE_CASE_ : Dict = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def a__ ( A__, A__ ):
return deepcopy(A__ ).integers(0, 1_0_0, 1_0 ).tolist() == deepcopy(A__ ).integers(0, 1_0_0, 1_0 ).tolist()
def a__ ( A__ ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(A__, *A__, **A__ ):
try:
return func(*A__, **A__ )
except HTTPError as err:
if str(A__ ).startswith('500' ) or str(A__ ).startswith('502' ):
pytest.xfail(str(A__ ) )
raise err
return decorator.decorator(_wrapper, A__ )
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = returncode
SCREAMING_SNAKE_CASE_ : Optional[int] = stdout
SCREAMING_SNAKE_CASE_ : int = stderr
async def a__ ( A__, A__ ):
while True:
SCREAMING_SNAKE_CASE_ : Tuple = await stream.readline()
if line:
callback(A__ )
else:
break
async def a__ ( A__, A__=None, A__=None, A__=None, A__=False, A__=False ):
if echo:
print('\nRunning: ', ' '.join(A__ ) )
SCREAMING_SNAKE_CASE_ : Dict = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=A__, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=A__, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Dict = []
def tee(A__, A__, A__, A__="" ):
SCREAMING_SNAKE_CASE_ : List[Any] = line.decode('utf-8' ).rstrip()
sink.append(A__ )
if not quiet:
print(A__, A__, file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda A__ : tee(A__, A__, sys.stdout, label='stdout:' ) ),
_read_stream(p.stderr, lambda A__ : tee(A__, A__, sys.stderr, label='stderr:' ) ),
], timeout=A__, )
return _RunOutput(await p.wait(), A__, A__ )
def a__ ( A__, A__=None, A__=None, A__=1_8_0, A__=False, A__=True ):
SCREAMING_SNAKE_CASE_ : str = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE_ : List[str] = loop.run_until_complete(
_stream_subprocess(A__, env=A__, stdin=A__, timeout=A__, quiet=A__, echo=A__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = ' '.join(A__ )
if result.returncode > 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Tuple = os.environ.get('PYTEST_XDIST_WORKER', 'gw0' )
SCREAMING_SNAKE_CASE_ : int = re.sub(r'^gw', '', A__, 0, re.M )
return int(A__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2_9_5_0_0
SCREAMING_SNAKE_CASE_ : List[Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 101
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_A: Union[str, Any] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def _lowerCAmelCase ( _lowerCAmelCase=None )-> Optional[Any]:
if subparsers is not None:
__UpperCAmelCase = subparsers.add_parser('tpu-config' , description=_description )
else:
__UpperCAmelCase = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
__UpperCAmelCase = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_lowerCAmelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_lowerCAmelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
__UpperCAmelCase = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_lowerCAmelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _lowerCAmelCase ( _lowerCAmelCase )-> int:
__UpperCAmelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCAmelCase ):
__UpperCAmelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__UpperCAmelCase = defaults.command_file
if not args.command and defaults.commands is not None:
__UpperCAmelCase = defaults.commands
if not args.tpu_name:
__UpperCAmelCase = defaults.tpu_name
if not args.tpu_zone:
__UpperCAmelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
__UpperCAmelCase = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
__UpperCAmelCase = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _lowerCAmelCase ):
__UpperCAmelCase = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
__UpperCAmelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowerCAmelCase ):
__UpperCAmelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__UpperCAmelCase = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
__UpperCAmelCase = '; '.join(_lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__UpperCAmelCase = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(_lowerCAmelCase )}' )
return
subprocess.run(_lowerCAmelCase )
print('Successfully setup pod.' )
def _lowerCAmelCase ( )-> Any:
__UpperCAmelCase = tpu_command_parser()
__UpperCAmelCase = parser.parse_args()
tpu_command_launcher(_lowerCAmelCase )
| 126
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_UpperCAmelCase = None
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCAmelCase = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
_UpperCAmelCase = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
_UpperCAmelCase = '▁'
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'token_type_ids']
A_ = FNetTokenizer
def __init__( self : str , _snake_case : List[Any]=None , _snake_case : Union[str, Any]=None , _snake_case : int=False , _snake_case : Union[str, Any]=True , _snake_case : int=True , _snake_case : Union[str, Any]="<unk>" , _snake_case : str="[SEP]" , _snake_case : Union[str, Any]="<pad>" , _snake_case : Union[str, Any]="[CLS]" , _snake_case : Union[str, Any]="[MASK]" , **_snake_case : Optional[Any] , )->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = (
AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case , normalized=_snake_case )
if isinstance(_snake_case , _snake_case )
else mask_token
)
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
__lowerCAmelCase : Dict = do_lower_case
__lowerCAmelCase : Optional[int] = remove_space
__lowerCAmelCase : Tuple = keep_accents
__lowerCAmelCase : List[str] = vocab_file
__lowerCAmelCase : Union[str, Any] = False if not self.vocab_file else True
def UpperCAmelCase__ ( self : List[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
__lowerCAmelCase : Any = [self.sep_token_id]
__lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = [self.sep_token_id]
__lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : List[str] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : Dict = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 718
|
from math import asin, atan, cos, radians, sin, sqrt, tan
_UpperCAmelCase = 6_378_137.0
_UpperCAmelCase = 6_356_752.314_245
_UpperCAmelCase = 637_8137
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
__lowerCAmelCase : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
__lowerCAmelCase : Tuple = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase : str = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase : int = radians(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = radians(SCREAMING_SNAKE_CASE )
# Equation
__lowerCAmelCase : List[str] = sin((phi_a - phi_a) / 2 )
__lowerCAmelCase : Optional[int] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__lowerCAmelCase : Optional[Any] = sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE ) * cos(SCREAMING_SNAKE_CASE ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 240
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = ["""CLIPFeatureExtractor"""]
UpperCamelCase__ : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 614
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676
| 0
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =list(snake_case_ )
__UpperCamelCase =list(snake_case_ )
__UpperCamelCase =0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
__UpperCamelCase ="""_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[]
while True:
__UpperCamelCase =["""$"""] * len(snake_case_ )
__UpperCamelCase =[]
for i in range(len(snake_case_ ) ):
for j in range(i + 1 , len(snake_case_ ) ):
__UpperCamelCase =compare_string(binary[i] , binary[j] )
if k is False:
__UpperCamelCase ="""*"""
__UpperCamelCase ="""*"""
temp.append('X' )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
__UpperCamelCase =list(set(snake_case_ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =[]
for minterm in minterms:
__UpperCamelCase =""""""
for _ in range(snake_case_ ):
__UpperCamelCase =str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =list(snake_case_ )
__UpperCamelCase =list(snake_case_ )
__UpperCamelCase =0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =[]
__UpperCamelCase =[0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
__UpperCamelCase =0
__UpperCamelCase =-1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
__UpperCamelCase =j
if count == 1:
__UpperCamelCase =1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
__UpperCamelCase =0
temp.append(prime_implicants[i] )
while True:
__UpperCamelCase =0
__UpperCamelCase =-1
__UpperCamelCase =0
for i in range(len(snake_case_ ) ):
__UpperCamelCase =chart[i].count(1 )
if count_n > max_n:
__UpperCamelCase =count_n
__UpperCamelCase =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
__UpperCamelCase =0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =[[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
__UpperCamelCase =prime_implicants[i].count('_' )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case_ ):
__UpperCamelCase =1
return chart
def _UpperCAmelCase ( ):
__UpperCamelCase =int(input('Enter the no. of variables\n' ) )
__UpperCamelCase =[
float(snake_case_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
__UpperCamelCase =decimal_to_binary(snake_case_ , snake_case_ )
__UpperCamelCase =check(snake_case_ )
print('Prime Implicants are:' )
print(snake_case_ )
__UpperCamelCase =prime_implicant_chart(snake_case_ , snake_case_ )
__UpperCamelCase =selection(snake_case_ , snake_case_ )
print('Essential Prime Implicants are:' )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 710
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682
| 0
|
"""simple docstring"""
import requests
__lowerCAmelCase : int = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Dict = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(F'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 58
|
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=128 , lowercase__=32 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__=None , ) -> str:
"""simple docstring"""
_snake_case : Tuple = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : List[Any] = seq_length
_snake_case : str = is_training
_snake_case : Union[str, Any] = use_input_mask
_snake_case : Optional[int] = use_token_type_ids
_snake_case : List[Any] = use_labels
_snake_case : str = vocab_size
_snake_case : Union[str, Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : Union[str, Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : str = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Dict = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : int = num_labels
_snake_case : Optional[int] = num_choices
_snake_case : Dict = scope
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Optional[int] = None
if self.use_input_mask:
_snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : int = None
if self.use_token_type_ids:
_snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : Optional[Any] = None
_snake_case : Any = None
_snake_case : List[str] = None
if self.use_labels:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : str = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = self.prepare_config_and_inputs()
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
"""simple docstring"""
_snake_case : List[str] = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : Dict = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
_snake_case : str = model(lowercase__ , token_type_ids=lowercase__ )
_snake_case : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = True
_snake_case : Optional[Any] = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : Union[str, Any] = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , )
_snake_case : Any = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , encoder_hidden_states=lowercase__ , )
_snake_case : int = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : Dict = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : int = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : Union[str, Any] = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : str = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : Dict = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , next_sentence_label=lowercase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : str = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : Optional[int] = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : List[Any] = self.num_labels
_snake_case : str = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : int = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : Union[str, Any] = self.num_labels
_snake_case : Any = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.num_choices
_snake_case : Tuple = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : Dict = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : List[Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : int = config_and_inputs
_snake_case : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (a__ , a__ , a__ , unittest.TestCase ):
_lowercase : Optional[int] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_lowercase : Dict = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Any = True
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__=False ) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
_snake_case : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase__ )
_snake_case : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = NezhaModelTester(self )
_snake_case : List[str] = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
_snake_case : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Any = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_snake_case : Dict = True
_snake_case : Tuple = model_class(config=lowercase__ )
_snake_case : Dict = self._prepare_for_class(lowercase__ , lowercase__ )
_snake_case : Dict = torch.jit.trace(
lowercase__ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ , os.path.join(lowercase__ , '''bert.pt''' ) )
_snake_case : Optional[int] = torch.jit.load(os.path.join(lowercase__ , '''bert.pt''' ) , map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) , inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[int] = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
_snake_case : str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_snake_case : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case : Any = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : Optional[int] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowercase__ )
_snake_case : Optional[Any] = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : str = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
_snake_case : str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_snake_case : List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case : Any = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : int = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , lowercase__ )
_snake_case : Tuple = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase__ , atol=1E-4 ) )
| 47
|
'''simple docstring'''
import os
import numpy
import onnx
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = a.name
_snake_case : List[Any] = b.name
_snake_case : Tuple = ''''''
_snake_case : Tuple = ''''''
_snake_case : Optional[Any] = a == b
_snake_case : List[Any] = name_a
_snake_case : str = name_b
return res
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = list(model.graph.initializer )
_snake_case : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_snake_case : List[Any] = inits[i].name
_snake_case : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = os.path.dirname(lowerCAmelCase_ )
_snake_case : str = os.path.basename(lowerCAmelCase_ )
_snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case : Union[str, Any] = list(model.graph.initializer )
_snake_case : Union[str, Any] = set()
_snake_case : Any = {}
_snake_case : str = []
_snake_case : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase_ )
dup_set.add(lowerCAmelCase_ )
_snake_case : List[Any] = inits[j].data_type
_snake_case : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , lowerCAmelCase_ )
total_reduced_size += mem_size
_snake_case : Union[str, Any] = inits[i].name
_snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase_ )
else:
_snake_case : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
_snake_case : List[str] = sorted(lowerCAmelCase_ )
_remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : List[str] = '''optimized_''' + model_file_name
_snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
onnx.save(lowerCAmelCase_ , lowerCAmelCase_ )
return new_model
| 47
| 1
|
def A(__a: int ):
lowerCAmelCase_ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 122
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122
| 1
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowercase_ = {
'''allenai/led-base-16384''': 16384,
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : List[Any] =VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : str =LEDTokenizer
lowerCamelCase__ : int =["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="replace" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=False , lowerCamelCase=True , **lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , **lowerCamelCase , )
__magic_name__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCamelCase ) != add_prefix_space:
__magic_name__ : Dict = getattr(lowerCamelCase , pre_tok_state.pop('''type''' ) )
__magic_name__ : str = add_prefix_space
__magic_name__ : Optional[Any] = pre_tok_class(**lowerCamelCase )
__magic_name__ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__magic_name__ : int = '''post_processor'''
__magic_name__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
if tokenizer_component_instance:
__magic_name__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__magic_name__ : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
__magic_name__ : List[Any] = tuple(state['''cls'''] )
__magic_name__ : Tuple = False
if state.get('''add_prefix_space''' , lowerCamelCase ) != add_prefix_space:
__magic_name__ : Optional[int] = add_prefix_space
__magic_name__ : Union[str, Any] = True
if state.get('''trim_offsets''' , lowerCamelCase ) != trim_offsets:
__magic_name__ : Tuple = trim_offsets
__magic_name__ : Optional[int] = True
if changes_to_apply:
__magic_name__ : Union[str, Any] = getattr(lowerCamelCase , state.pop('''type''' ) )
__magic_name__ : Union[str, Any] = component_class(**lowerCamelCase )
setattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase ( self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase ( self , lowerCamelCase ) -> str:
"""simple docstring"""
__magic_name__ : Any = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else value
__magic_name__ : Union[str, Any] = value
def lowercase ( self , *lowerCamelCase , **lowerCamelCase ) -> BatchEncoding:
"""simple docstring"""
__magic_name__ : Optional[Any] = kwargs.get('''is_split_into_words''' , lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def lowercase ( self , *lowerCamelCase , **lowerCamelCase ) -> BatchEncoding:
"""simple docstring"""
__magic_name__ : List[str] = kwargs.get('''is_split_into_words''' , lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
__magic_name__ : Optional[Any] = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
__magic_name__ : Any = [self.sep_token_id]
__magic_name__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ) -> dict:
"""simple docstring"""
__magic_name__ : str = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__magic_name__ : str = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__magic_name__ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__magic_name__ : str = len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCamelCase )
if needs_to_be_padded:
__magic_name__ : int = len(lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__magic_name__ : int = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__magic_name__ : Any = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 717
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowercase_ = TypeVar('''T''')
class A__ ( Generic[T] ):
def __init__( self , lowerCamelCase = True ) -> None:
"""simple docstring"""
__magic_name__ : dict[T, list[T]] = {} # dictionary of lists
__magic_name__ : str = directed
def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> GraphAdjacencyList[T]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase )
self.adj_list[destination_vertex].append(lowerCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase )
__magic_name__ : Optional[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCamelCase )
__magic_name__ : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__magic_name__ : Optional[int] = [destination_vertex]
__magic_name__ : Optional[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase )
__magic_name__ : List[str] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__magic_name__ : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__magic_name__ : List[str] = [destination_vertex]
__magic_name__ : List[str] = []
return self
def __repr__( self ) -> str:
"""simple docstring"""
return pformat(self.adj_list )
| 336
| 0
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A, A=1_024, A=1_024, A=3.6 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = tokenizer
SCREAMING_SNAKE_CASE : str = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Tuple = dataset
SCREAMING_SNAKE_CASE : List[Any] = seq_length
SCREAMING_SNAKE_CASE : Tuple = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = iter(self.dataset )
SCREAMING_SNAKE_CASE : List[Any] = True
while more_examples:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
SCREAMING_SNAKE_CASE : Dict = False
break
SCREAMING_SNAKE_CASE : Dict = tokenizer(A, truncation=A )['input_ids']
SCREAMING_SNAKE_CASE : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0, len(A ), self.seq_length ):
SCREAMING_SNAKE_CASE : Union[str, Any] = all_token_ids[i : i + self.seq_length]
if len(A ) == self.seq_length:
yield torch.tensor(A )
def lowercase__( __UpperCamelCase: Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {'streaming': True}
SCREAMING_SNAKE_CASE : Dict = load_dataset(args.dataset_name ,split='train' ,**__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = ConstantLengthDataset(__UpperCamelCase ,__UpperCamelCase ,seq_length=args.seq_length )
SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(__UpperCamelCase ,batch_size=args.batch_size )
return eval_dataloader
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
model.eval()
SCREAMING_SNAKE_CASE : Any = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(__UpperCamelCase ,labels=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
SCREAMING_SNAKE_CASE : str = torch.mean(torch.cat(__UpperCamelCase ) )
try:
SCREAMING_SNAKE_CASE : Any = torch.exp(__UpperCamelCase )
except OverflowError:
SCREAMING_SNAKE_CASE : List[str] = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCamelCase_ = Accelerator()
# Parse configuration
UpperCamelCase_ = HfArgumentParser(EvaluationArguments)
UpperCamelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCamelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCamelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
UpperCamelCase_ , UpperCamelCase_ = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 28
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : BigBirdConfig
__UpperCAmelCase : jnp.dtype = jnp.floataa
__UpperCAmelCase : bool = True
def snake_case ( self : Optional[Any] ):
super().setup()
__lowercase : str = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Optional[Any] , *lowercase__ : Any , **lowercase__ : Optional[Any] ):
__lowercase : str = super().__call__(*lowercase__ , **lowercase__ )
__lowercase : Dict = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : Any = FlaxBigBirdForNaturalQuestionsModule
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
def cross_entropy(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None ):
__lowercase : Union[str, Any] = logits.shape[-1]
__lowercase : List[str] = (labels[..., None] == jnp.arange(_lowerCamelCase )[None]).astype("f4" )
__lowercase : int = jax.nn.log_softmax(_lowerCamelCase, axis=-1 )
__lowercase : int = -jnp.sum(labels * logits, axis=-1 )
if reduction is not None:
__lowercase : List[str] = reduction(_lowerCamelCase )
return loss
__lowercase : Union[str, Any] = partial(_lowerCamelCase, reduction=jnp.mean )
__lowercase : Dict = cross_entropy(_lowerCamelCase, _lowerCamelCase )
__lowercase : int = cross_entropy(_lowerCamelCase, _lowerCamelCase )
__lowercase : Union[str, Any] = cross_entropy(_lowerCamelCase, _lowerCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = "google/bigbird-roberta-base"
__UpperCAmelCase : int = 3000
__UpperCAmelCase : int = 10500
__UpperCAmelCase : int = 128
__UpperCAmelCase : int = 3
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 5
# tx_args
__UpperCAmelCase : float = 3E-5
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : int = 20000
__UpperCAmelCase : float = 0.00_95
__UpperCAmelCase : str = "bigbird-roberta-natural-questions"
__UpperCAmelCase : str = "training-expt"
__UpperCAmelCase : str = "data/nq-training.jsonl"
__UpperCAmelCase : str = "data/nq-validation.jsonl"
def snake_case ( self : Tuple ):
os.makedirs(self.base_dir , exist_ok=lowercase__ )
__lowercase : int = os.path.join(self.base_dir , self.save_dir )
__lowercase : Tuple = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : int
__UpperCAmelCase : int = 4096 # no dynamic padding on TPUs
def __call__( self : Dict , lowercase__ : Tuple ):
__lowercase : List[Any] = self.collate_fn(lowercase__ )
__lowercase : Dict = jax.tree_util.tree_map(lowercase__ , lowercase__ )
return batch
def snake_case ( self : str , lowercase__ : Union[str, Any] ):
__lowercase ,__lowercase : List[Any] = self.fetch_inputs(features["input_ids"] )
__lowercase : int = {
"input_ids": jnp.array(lowercase__ , dtype=jnp.intaa ),
"attention_mask": jnp.array(lowercase__ , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def snake_case ( self : List[Any] , lowercase__ : list ):
__lowercase : str = [self._fetch_inputs(lowercase__ ) for ids in input_ids]
return zip(*lowercase__ )
def snake_case ( self : Any , lowercase__ : list ):
__lowercase : Optional[Any] = [1 for _ in range(len(lowercase__ ) )]
while len(lowercase__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None ) ->Any:
"""simple docstring"""
if seed is not None:
__lowercase : Optional[Any] = dataset.shuffle(seed=_lowerCamelCase )
for i in range(len(_lowerCamelCase ) // batch_size ):
__lowercase : int = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_lowerCamelCase )
@partial(jax.pmap, axis_name="batch" )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, **_lowerCamelCase ) ->Any:
"""simple docstring"""
def loss_fn(_lowerCamelCase ):
__lowercase : Dict = model_inputs.pop("start_labels" )
__lowercase : str = model_inputs.pop("end_labels" )
__lowercase : Union[str, Any] = model_inputs.pop("pooled_labels" )
__lowercase : List[str] = state.apply_fn(**_lowerCamelCase, params=_lowerCamelCase, dropout_rng=_lowerCamelCase, train=_lowerCamelCase )
__lowercase ,__lowercase ,__lowercase : List[str] = outputs
return state.loss_fn(
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, )
__lowercase ,__lowercase : Any = jax.random.split(_lowerCamelCase )
__lowercase : Dict = jax.value_and_grad(_lowerCamelCase )
__lowercase ,__lowercase : Tuple = grad_fn(state.params )
__lowercase : str = jax.lax.pmean({"loss": loss}, axis_name="batch" )
__lowercase : str = jax.lax.pmean(_lowerCamelCase, "batch" )
__lowercase : Any = state.apply_gradients(grads=_lowerCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap, axis_name="batch" )
def snake_case__ ( _lowerCamelCase, **_lowerCamelCase ) ->Optional[int]:
"""simple docstring"""
__lowercase : Optional[int] = model_inputs.pop("start_labels" )
__lowercase : Optional[int] = model_inputs.pop("end_labels" )
__lowercase : Optional[Any] = model_inputs.pop("pooled_labels" )
__lowercase : Optional[int] = state.apply_fn(**_lowerCamelCase, params=state.params, train=_lowerCamelCase )
__lowercase ,__lowercase ,__lowercase : int = outputs
__lowercase : int = state.loss_fn(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
__lowercase : str = jax.lax.pmean({"loss": loss}, axis_name="batch" )
return metrics
class lowerCAmelCase__ ( train_state.TrainState ):
"""simple docstring"""
__UpperCAmelCase : Callable = struct.field(pytree_node=lowerCAmelCase_ )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Args
__UpperCAmelCase : Callable
__UpperCAmelCase : Callable
__UpperCAmelCase : Callable
__UpperCAmelCase : Callable
__UpperCAmelCase : wandb
__UpperCAmelCase : Callable = None
def snake_case ( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any]=None ):
__lowercase : Optional[Any] = model.params
__lowercase : Union[str, Any] = TrainState.create(
apply_fn=model.__call__ , params=lowercase__ , tx=lowercase__ , loss_fn=lowercase__ , )
if ckpt_dir is not None:
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : List[str] = restore_checkpoint(lowercase__ , lowercase__ )
__lowercase : Optional[Any] = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
__lowercase ,__lowercase : Any = build_tx(**lowercase__ )
__lowercase : Any = train_state.TrainState(
step=lowercase__ , apply_fn=model.__call__ , params=lowercase__ , tx=lowercase__ , opt_state=lowercase__ , )
__lowercase : List[Any] = args
__lowercase : List[str] = data_collator
__lowercase : Dict = lr
__lowercase : List[str] = params
__lowercase : str = jax_utils.replicate(lowercase__ )
return state
def snake_case ( self : Optional[Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Any ):
__lowercase : Tuple = self.args
__lowercase : Dict = len(lowercase__ ) // args.batch_size
__lowercase : Dict = jax.random.PRNGKey(0 )
__lowercase : Optional[Any] = jax.random.split(lowercase__ , jax.device_count() )
for epoch in range(args.max_epochs ):
__lowercase : Any = jnp.array(0 , dtype=jnp.floataa )
__lowercase : Union[str, Any] = get_batched_dataset(lowercase__ , args.batch_size , seed=lowercase__ )
__lowercase : Optional[int] = 0
for batch in tqdm(lowercase__ , total=lowercase__ , desc=f'Running EPOCH-{epoch}' ):
__lowercase : Any = self.data_collator(lowercase__ )
__lowercase ,__lowercase ,__lowercase : int = self.train_step_fn(lowercase__ , lowercase__ , **lowercase__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
__lowercase : Union[str, Any] = jax_utils.unreplicate(state.step )
__lowercase : Tuple = running_loss.item() / i
__lowercase : Tuple = self.scheduler_fn(state_step - 1 )
__lowercase : Optional[int] = self.evaluate(lowercase__ , lowercase__ )
__lowercase : Union[str, Any] = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(lowercase__ ) )
self.logger.log(lowercase__ , commit=lowercase__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=lowercase__ )
def snake_case ( self : int , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ):
__lowercase : int = get_batched_dataset(lowercase__ , self.args.batch_size )
__lowercase : List[Any] = len(lowercase__ ) // self.args.batch_size
__lowercase : List[str] = jnp.array(0 , dtype=jnp.floataa )
__lowercase : Any = 0
for batch in tqdm(lowercase__ , total=lowercase__ , desc="Evaluating ... " ):
__lowercase : Optional[Any] = self.data_collator(lowercase__ )
__lowercase : Dict = self.val_step_fn(lowercase__ , **lowercase__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def snake_case ( self : Dict , lowercase__ : Union[str, Any] , lowercase__ : Any ):
__lowercase : int = jax_utils.unreplicate(lowercase__ )
print(f'SAVING CHECKPOINT IN {save_dir}' , end=" ... " )
self.model_save_fn(lowercase__ , params=state.params )
with open(os.path.join(lowercase__ , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowercase__ , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(lowercase__ , "data_collator.joblib" ) )
with open(os.path.join(lowercase__ , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , lowercase__ )
print("DONE" )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->str:
"""simple docstring"""
print(F'RESTORING CHECKPOINT FROM {save_dir}', end=" ... " )
with open(os.path.join(_lowerCamelCase, "flax_model.msgpack" ), "rb" ) as f:
__lowercase : Union[str, Any] = from_bytes(state.params, f.read() )
with open(os.path.join(_lowerCamelCase, "opt_state.msgpack" ), "rb" ) as f:
__lowercase : Dict = from_bytes(state.opt_state, f.read() )
__lowercase : int = joblib.load(os.path.join(_lowerCamelCase, "args.joblib" ) )
__lowercase : Any = joblib.load(os.path.join(_lowerCamelCase, "data_collator.joblib" ) )
with open(os.path.join(_lowerCamelCase, "training_state.json" ), "r" ) as f:
__lowercase : int = json.load(_lowerCamelCase )
__lowercase : List[str] = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = num_train_steps - warmup_steps
__lowercase : Dict = optax.linear_schedule(init_value=_lowerCamelCase, end_value=_lowerCamelCase, transition_steps=_lowerCamelCase )
__lowercase : Optional[Any] = optax.linear_schedule(init_value=_lowerCamelCase, end_value=1E-7, transition_steps=_lowerCamelCase )
__lowercase : Dict = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps] )
return lr
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
def weight_decay_mask(_lowerCamelCase ):
__lowercase : Tuple = traverse_util.flatten_dict(_lowerCamelCase )
__lowercase : int = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(_lowerCamelCase )
__lowercase : Tuple = scheduler_fn(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
__lowercase : List[Any] = optax.adamw(learning_rate=_lowerCamelCase, weight_decay=_lowerCamelCase, mask=_lowerCamelCase )
return tx, lr
| 575
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCamelCase : Any = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Optional[Any] = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : str = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCamelCase : Any = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCamelCase : Union[str, Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : int = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : Union[str, Any] = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCamelCase : int = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Dict = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCamelCase : Union[str, Any] = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : Optional[Any] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : Union[str, Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : int = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCamelCase : List[Any] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCamelCase : Optional[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCamelCase : int = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCamelCase : Any = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : Tuple = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCamelCase : Optional[int] = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCamelCase : List[str] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCamelCase : Optional[int] = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase : Optional[int] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Union[str, Any] = key.split("." )
lowerCamelCase : int = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : Optional[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : List[Any] = val[:dim, :]
lowerCamelCase : int = val[dim : dim * 2, :]
lowerCamelCase : str = val[-dim:, :]
else:
lowerCamelCase : Optional[Any] = val[:dim]
lowerCamelCase : List[str] = val[dim : dim * 2]
lowerCamelCase : str = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Optional[int] = key.split("." )
lowerCamelCase : Optional[int] = int(key_split[3] )
lowerCamelCase : List[Any] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : Optional[Any] = val[:dim, :]
lowerCamelCase : List[Any] = val[
dim : dim * 2, :
]
lowerCamelCase : str = val[-dim:, :]
else:
lowerCamelCase : int = val[:dim]
lowerCamelCase : Optional[Any] = val[dim : dim * 2]
lowerCamelCase : Optional[int] = val[-dim:]
else:
lowerCamelCase : Optional[int] = rename_key(SCREAMING_SNAKE_CASE_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Optional[Any] = val.squeeze_()
else:
lowerCamelCase : Any = val
return orig_state_dict
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : Any = GroupViTConfig()
lowerCamelCase : List[Any] = GroupViTModel(SCREAMING_SNAKE_CASE_ ).eval()
lowerCamelCase : Tuple = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
lowerCamelCase : List[str] = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Union[str, Any] = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE_ ) == 0)
# verify result
lowerCamelCase : List[str] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCamelCase : Any = prepare_img()
lowerCamelCase : int = processor(text=["a photo of a cat", "a photo of a dog"] , images=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
with torch.no_grad():
lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE_ )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : List[Any] = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : int = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("Successfully saved processor and model to" , SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
_snake_case = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 721
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
_snake_case = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
_snake_case = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = "whisper"
__A : List[str] = ["past_key_values"]
__A : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __A=5_1865 , __A=80 , __A=6 , __A=4 , __A=6 , __A=4 , __A=1536 , __A=1536 , __A=0.0 , __A=0.0 , __A=5_0257 , __A=True , __A=True , __A="gelu" , __A=256 , __A=0.0 , __A=0.0 , __A=0.0 , __A=0.02 , __A=False , __A=1500 , __A=448 , __A=5_0256 , __A=5_0256 , __A=5_0256 , __A=None , __A=[220, 5_0256] , __A=False , __A=256 , __A=False , __A=0.05 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=0 , __A=7 , **__A , ):
"""simple docstring"""
lowerCamelCase : List[str] = vocab_size
lowerCamelCase : Optional[Any] = num_mel_bins
lowerCamelCase : List[Any] = d_model
lowerCamelCase : List[Any] = encoder_layers
lowerCamelCase : Optional[Any] = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_layers
lowerCamelCase : int = decoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : int = encoder_ffn_dim
lowerCamelCase : Union[str, Any] = dropout
lowerCamelCase : Dict = attention_dropout
lowerCamelCase : Optional[Any] = activation_dropout
lowerCamelCase : Optional[Any] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : List[Any] = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Optional[Any] = use_cache
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase : str = max_source_positions
lowerCamelCase : List[str] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCamelCase : Optional[int] = classifier_proj_size
lowerCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Optional[int] = apply_spec_augment
lowerCamelCase : str = mask_time_prob
lowerCamelCase : Union[str, Any] = mask_time_length
lowerCamelCase : Union[str, Any] = mask_time_min_masks
lowerCamelCase : Any = mask_feature_prob
lowerCamelCase : Any = mask_feature_length
lowerCamelCase : Dict = mask_feature_min_masks
lowerCamelCase : List[str] = median_filter_width
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , suppress_tokens=__A , begin_suppress_tokens=__A , **__A , )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase : List[str] = {0: "batch"}
else:
lowerCamelCase : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__A , direction="inputs" )
return common_inputs
def _snake_case ( self , __A , __A = -1 , __A = -1 , __A = False , __A = None , __A = 2_2050 , __A = 5.0 , __A = 220 , ):
"""simple docstring"""
lowerCamelCase : Any = OrderedDict()
lowerCamelCase : List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__A , framework=__A , sampling_rate=__A , time_duration=__A , frequency=__A , )
lowerCamelCase : Any = encoder_inputs["input_features"].shape[2]
lowerCamelCase : Optional[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , __A , __A , __A , __A )
lowerCamelCase : List[Any] = encoder_inputs.pop("input_features" )
lowerCamelCase : Dict = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowerCamelCase : List[str] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-3
| 231
| 0
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=True , UpperCAmelCase_="pt" ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : Dict = {'''add_prefix_space''': True} if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and not line.startswith(''' ''' ) else {}
__UpperCAmelCase : Tuple = padding_side
return tokenizer(
[line] , max_length=UpperCAmelCase_ , padding='''max_length''' if pad_to_max_length else None , truncation=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , ) ->Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = input_ids.ne(UpperCAmelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : int , __lowercase : List[str] , __lowercase : Union[str, Any]="train" , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : Any=None , __lowercase : Optional[int]="" , ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Optional[int] = Path(__lowercase ).joinpath(type_path + '''.source''' )
__UpperCAmelCase : List[str] = Path(__lowercase ).joinpath(type_path + '''.target''' )
__UpperCAmelCase : int = self.get_char_lens(self.src_file )
__UpperCAmelCase : Dict = max_source_length
__UpperCAmelCase : str = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
__UpperCAmelCase : List[str] = tokenizer
__UpperCAmelCase : Dict = prefix
if n_obs is not None:
__UpperCAmelCase : str = self.src_lens[:n_obs]
__UpperCAmelCase : Optional[int] = src_lang
__UpperCAmelCase : List[str] = tgt_lang
def __len__( self : int ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : str , __lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = index + 1 # linecache starts at 1
__UpperCAmelCase : int = self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip('''\n''' )
__UpperCAmelCase : str = linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCAmelCase : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
__UpperCAmelCase : Any = self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
__UpperCAmelCase : str = encode_line(__lowercase , __lowercase , self.max_source_length , '''right''' )
__UpperCAmelCase : Tuple = encode_line(__lowercase , __lowercase , self.max_target_length , '''right''' )
__UpperCAmelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
__UpperCAmelCase : str = target_inputs['''input_ids'''].squeeze()
__UpperCAmelCase : Optional[Any] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A_ ( __lowercase : Optional[Any] ):
'''simple docstring'''
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def A_ ( self : Optional[Any] , __lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = torch.stack([x['''input_ids'''] for x in batch] )
__UpperCAmelCase : Optional[Any] = torch.stack([x['''attention_mask'''] for x in batch] )
__UpperCAmelCase : Optional[int] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__UpperCAmelCase : Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
__UpperCAmelCase : Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
__UpperCAmelCase : str = trim_batch(__lowercase , __lowercase )
__UpperCAmelCase , __UpperCAmelCase : str = trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
__UpperCAmelCase : Tuple = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowercase__ :Tuple = getLogger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ ) ->int:
"""simple docstring"""
return list(itertools.chain.from_iterable(UpperCAmelCase_ ) )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->None:
"""simple docstring"""
__UpperCAmelCase : str = get_git_info()
save_json(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''git_log.json''' ) )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=4 , **UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
with open(UpperCAmelCase_ , '''w''' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , indent=UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->str:
"""simple docstring"""
with open(UpperCAmelCase_ ) as f:
return json.load(UpperCAmelCase_ )
def lowerCamelCase_ ( ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = git.Repo(search_parent_directories=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = {
'''repo_id''': str(UpperCAmelCase_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->List:
"""simple docstring"""
return list(map(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->str:
"""simple docstring"""
with open(UpperCAmelCase_ , '''wb''' ) as f:
return pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Tuple:
"""simple docstring"""
def remove_articles(UpperCAmelCase_ ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , UpperCAmelCase_ )
def white_space_fix(UpperCAmelCase_ ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase_ ):
__UpperCAmelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase_ ) ) ) )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
"""simple docstring"""
__UpperCAmelCase : Dict = normalize_answer(UpperCAmelCase_ ).split()
__UpperCAmelCase : Optional[Any] = normalize_answer(UpperCAmelCase_ ).split()
__UpperCAmelCase : List[Any] = Counter(UpperCAmelCase_ ) & Counter(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = sum(common.values() )
if num_same == 0:
return 0
__UpperCAmelCase : Any = 1.0 * num_same / len(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = 1.0 * num_same / len(UpperCAmelCase_ )
__UpperCAmelCase : str = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
"""simple docstring"""
return normalize_answer(UpperCAmelCase_ ) == normalize_answer(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
"""simple docstring"""
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = 0
for hypo, pred in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
em += exact_match_score(UpperCAmelCase_ , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
em /= len(UpperCAmelCase_ )
return {"em": em}
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
"""simple docstring"""
__UpperCAmelCase : str = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCAmelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) and not hasattr(UpperCAmelCase_ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(UpperCAmelCase_ ) )
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
continue
__UpperCAmelCase : Dict = p if hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) else equivalent_param[p]
setattr(UpperCAmelCase_ , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
return hparams, config
| 522
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowercase__ :List[Any] = Lock()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCAmelCase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__UpperCAmelCase : str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__UpperCAmelCase : List[Any] = min(UpperCAmelCase_ , UpperCAmelCase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCAmelCase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__UpperCAmelCase : Union[str, Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__UpperCAmelCase : Union[str, Any] = max(UpperCAmelCase_ , UpperCAmelCase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__UpperCAmelCase : Optional[int] = Pipe()
__UpperCAmelCase : str = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__UpperCAmelCase : Optional[int] = temp_rs
__UpperCAmelCase : Any = temp_rr
for i in range(1 , len(UpperCAmelCase_ ) - 1 ):
__UpperCAmelCase : List[Any] = Pipe()
__UpperCAmelCase : List[Any] = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__UpperCAmelCase : int = temp_rs
__UpperCAmelCase : Optional[Any] = temp_rr
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(
len(UpperCAmelCase_ ) - 1,
arr[len(UpperCAmelCase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCAmelCase_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCAmelCase_ ) ):
__UpperCAmelCase : str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCamelCase_ ( ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*UpperCAmelCase_ )
__UpperCAmelCase : int = odd_even_transposition(UpperCAmelCase_ )
print('''Sorted List\n''' )
print(*UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 522
| 1
|
def _snake_case (__lowercase):
if p < 2:
raise ValueError('p should not be less than 2!')
elif p == 2:
return True
UpperCamelCase_ = 4
UpperCamelCase_ = (1 << p) - 1
for _ in range(p - 2):
UpperCamelCase_ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 618
|
def _snake_case (__lowercase , __lowercase , __lowercase):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowercase))
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
# Base Case
if index == len(__lowercase):
return True
# Recursive Step
for i in range(__lowercase):
if valid_coloring(graph[index] , __lowercase , __lowercase):
# Color current vertex
UpperCamelCase_ = i
# Validate coloring
if util_color(__lowercase , __lowercase , __lowercase , index + 1):
return True
# Backtrack
UpperCamelCase_ = -1
return False
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = [-1] * len(__lowercase)
if util_color(__lowercase , __lowercase , __lowercase , 0):
return colored_vertices
return []
| 618
| 1
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case :
'''simple docstring'''
@staticmethod
def _lowercase ( *lowerCAmelCase_ : str , **lowerCAmelCase_ : Any ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase ( UpperCAmelCase )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def UpperCAmelCase ( UpperCAmelCase )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = np.array(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = npimg.shape
return {"hash": hashimage(UpperCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCAmelCase : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _lowercase ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = MaskGenerationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowercase ( self : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
@require_torch
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
SCREAMING_SNAKE_CASE_ = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
SCREAMING_SNAKE_CASE_ = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def _lowercase ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''facebook/sam-vit-huge'''
SCREAMING_SNAKE_CASE_ = pipeline('''mask-generation''' , model=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
SCREAMING_SNAKE_CASE_ = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
] , )
| 393
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase ( UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = int(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = t // 3600, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=300 )-> Optional[Any]:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCAmelCase ( UpperCAmelCase )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
SCREAMING_SNAKE_CASE_ = f'''{elt:.6f}''' if isinstance(UpperCAmelCase ,UpperCAmelCase ) else str(UpperCAmelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class snake_case :
'''simple docstring'''
UpperCAmelCase : Tuple = 5
UpperCAmelCase : Any = 0.2
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional["NotebookTrainingTracker"] = None , lowerCAmelCase_ : int = 300 , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = total
SCREAMING_SNAKE_CASE_ = '''''' if prefix is None else prefix
SCREAMING_SNAKE_CASE_ = leave
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = width
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : str = None ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = value
if comment is not None:
SCREAMING_SNAKE_CASE_ = comment
if self.last_value is None:
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = self.warmup
SCREAMING_SNAKE_CASE_ = 1
self.update_bar(lowerCAmelCase_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
SCREAMING_SNAKE_CASE_ = self.elapsed_time / (value - self.start_value)
else:
SCREAMING_SNAKE_CASE_ = None
if value >= self.total:
SCREAMING_SNAKE_CASE_ = self.total
SCREAMING_SNAKE_CASE_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
SCREAMING_SNAKE_CASE_ = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = current_time
if self.average_time_per_item is None:
SCREAMING_SNAKE_CASE_ = 1
else:
SCREAMING_SNAKE_CASE_ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _lowercase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int=None ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ''' ''' * (len(str(self.total ) ) - len(str(lowerCAmelCase_ ) )) + str(lowerCAmelCase_ )
if self.elapsed_time is None:
SCREAMING_SNAKE_CASE_ = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
SCREAMING_SNAKE_CASE_ = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
SCREAMING_SNAKE_CASE_ = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
SCREAMING_SNAKE_CASE_ = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = None if column_names is None else [column_names]
SCREAMING_SNAKE_CASE_ = None
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
SCREAMING_SNAKE_CASE_ = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowercase ( self : List[str] , lowerCAmelCase_ : int ) -> List[str]:
"""simple docstring"""
if self.inner_table is None:
SCREAMING_SNAKE_CASE_ = [list(values.keys() ), list(values.values() )]
else:
SCREAMING_SNAKE_CASE_ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = columns
self.inner_table.append([values[c] for c in columns] )
def _lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=300 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = NotebookProgressBar(lowerCAmelCase_ , prefix=lowerCAmelCase_ , parent=self , width=lowerCAmelCase_ )
return self.child_bar
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
self.display()
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
def _lowercase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
SCREAMING_SNAKE_CASE_ = NotebookTrainingTracker(state.max_steps , lowerCAmelCase_ )
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
SCREAMING_SNAKE_CASE_ = False
def _lowercase ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if not has_length(lowerCAmelCase_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE_ = self.training_tracker.add_child(len(lowerCAmelCase_ ) )
else:
SCREAMING_SNAKE_CASE_ = NotebookProgressBar(len(lowerCAmelCase_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , **lowerCAmelCase_ : int ) -> Any:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
SCREAMING_SNAKE_CASE_ = None
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
SCREAMING_SNAKE_CASE_ = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
SCREAMING_SNAKE_CASE_ = state.global_step
self.training_tracker.write_line(lowerCAmelCase_ )
def _lowercase ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : str ) -> str:
"""simple docstring"""
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE_ = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
SCREAMING_SNAKE_CASE_ = log['''loss''']
break
if self.first_column == "Epoch":
SCREAMING_SNAKE_CASE_ = int(state.epoch )
else:
SCREAMING_SNAKE_CASE_ = state.global_step
SCREAMING_SNAKE_CASE_ = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
SCREAMING_SNAKE_CASE_ = re.sub(r'''\_loss$''' , '''''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop('''total_flos''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop('''epoch''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop(F'''{metric_key_prefix}_runtime''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , lowerCAmelCase_ )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
SCREAMING_SNAKE_CASE_ = v
else:
SCREAMING_SNAKE_CASE_ = k.split('''_''' )
SCREAMING_SNAKE_CASE_ = ''' '''.join([part.capitalize() for part in splits[1:]] )
SCREAMING_SNAKE_CASE_ = v
self.training_tracker.write_line(lowerCAmelCase_ )
self.training_tracker.remove_child()
SCREAMING_SNAKE_CASE_ = None
# Evaluation takes a long time so we should force the next update.
SCREAMING_SNAKE_CASE_ = True
def _lowercase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = None
| 393
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class lowerCamelCase__ ( _UpperCAmelCase):
"""simple docstring"""
_A = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True})
_A = Features({'question': Value('string'), 'context': Value('string')})
_A = Features(
{
'answers': Sequence(
{
'text': Value('string'),
'answer_start': Value('int32'),
})
})
_A = "question"
_A = "context"
_A = "answers"
@property
def _a (self ):
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 704
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = mock.Mock()
lowerCamelCase = 5_00
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
lowerCamelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _a (self ):
'''simple docstring'''
lowerCamelCase = mock.Mock()
lowerCamelCase = 5_00
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
lowerCamelCase = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _a (self ):
'''simple docstring'''
try:
lowerCamelCase = tempfile.mktemp()
with open(__a , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __a )
lowerCamelCase = AlbertTokenizer.from_pretrained(__a )
finally:
os.remove(__a )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __a )
lowerCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _a (self ):
'''simple docstring'''
lowerCamelCase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _a (cls ):
'''simple docstring'''
lowerCamelCase = TOKEN
HfFolder.save_token(__a )
@classmethod
def _a (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _a (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizer(__a )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a , repo_id="test-tokenizer" , push_to_hub=__a , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _a (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizer(__a )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__a , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__a , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _a (self ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = CustomTokenizer(__a )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizerFast.from_pretrained(__a )
bert_tokenizer.save_pretrained(__a )
lowerCamelCase = CustomTokenizerFast.from_pretrained(__a )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
lowerCamelCase = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=__a , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
lowerCamelCase = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__a , ["AB", "C"] )
| 484
| 0
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={"vocab_file": "vocab.txt"}
lowerCAmelCase__ ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
lowerCAmelCase__ ={
"facebook/esm2_t6_8M_UR50D": 1_024,
"facebook/esm2_t12_35M_UR50D": 1_024,
}
def _a ( UpperCAmelCase__ ) -> Optional[Any]:
with open(UpperCAmelCase__ , '''r''' ) as f:
__SCREAMING_SNAKE_CASE = f.read().splitlines()
return [l.strip() for l in lines]
class A__( __magic_name__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]="<unk>" , __SCREAMING_SNAKE_CASE : Dict="<cls>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<pad>" , __SCREAMING_SNAKE_CASE : Dict="<mask>" , __SCREAMING_SNAKE_CASE : List[Any]="<eos>" , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = load_vocab_file(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = dict(enumerate(self.all_tokens ) )
__SCREAMING_SNAKE_CASE = {tok: ind for ind, tok in enumerate(self.all_tokens )}
__SCREAMING_SNAKE_CASE = unk_token
__SCREAMING_SNAKE_CASE = cls_token
__SCREAMING_SNAKE_CASE = pad_token
__SCREAMING_SNAKE_CASE = mask_token
__SCREAMING_SNAKE_CASE = eos_token
__SCREAMING_SNAKE_CASE = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _a ( self : int , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
return self._id_to_token.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
return self._token_to_id.get(__SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
return text.split()
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=False ) -> List[str]:
"""simple docstring"""
return len(self._id_to_token )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
return self._token_to_id.get(__SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
return self._id_to_token.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List , __SCREAMING_SNAKE_CASE : Optional[List] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__SCREAMING_SNAKE_CASE = [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
mask += [0] * len(__SCREAMING_SNAKE_CASE ) + [1]
return mask
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def _a ( self : Dict ) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[List[str], List[AddedToken]] , __SCREAMING_SNAKE_CASE : bool = False ) -> int:
"""simple docstring"""
return super()._add_tokens(__SCREAMING_SNAKE_CASE , special_tokens=__SCREAMING_SNAKE_CASE )
| 482
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCAmelCase__ =None
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ="▁"
lowerCAmelCase__ ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase__ ={
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
lowerCAmelCase__ ={
"google/pegasus-xsum": 512,
}
class A__( __magic_name__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = PegasusTokenizer
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]="<pad>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]="<mask_2>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<mask_1>" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]=1_03 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = offset
if additional_special_tokens is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__SCREAMING_SNAKE_CASE )}, but is"""
f""" {type(__SCREAMING_SNAKE_CASE )}""" )
__SCREAMING_SNAKE_CASE = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(__SCREAMING_SNAKE_CASE ) ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__SCREAMING_SNAKE_CASE = additional_special_tokens_extended
else:
__SCREAMING_SNAKE_CASE = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , mask_token_sent=__SCREAMING_SNAKE_CASE , offset=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List , __SCREAMING_SNAKE_CASE : Optional[List] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(__SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 482
| 1
|
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = generate_pascal_triangle(_A )
for row_idx in range(_A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
snake_case_ = []
for current_row_idx in range(_A ):
snake_case_ = populate_current_row(_A , _A )
triangle.append(_A )
return triangle
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case_ , snake_case_ = 1, 1
for current_col_idx in range(1 , _A ):
calculate_current_element(
_A , _A , _A , _A )
return current_row
def lowerCamelCase__ ( _A , _A , _A , _A , ):
'''simple docstring'''
snake_case_ = triangle[current_row_idx - 1][current_col_idx - 1]
snake_case_ = triangle[current_row_idx - 1][current_col_idx]
snake_case_ = above_to_left_elt + above_to_right_elt
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
snake_case_ = [[1]]
for row_index in range(1 , _A ):
snake_case_ = [0] + result[-1] + [0]
snake_case_ = row_index + 1
# Calculate the number of distinct elements in a row
snake_case_ = sum(divmod(_A , 2 ) )
snake_case_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case_ = row_first_half + row_second_half
result.append(_A )
return result
def lowerCamelCase__ ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_A , _A ) -> None:
snake_case_ = f"{func.__name__}({value})"
snake_case_ = timeit(f"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_A , _A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 139
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def lowerCamelCase__ ( _A , _A , _A , _A="attention" ):
'''simple docstring'''
snake_case_ = snake_case_ = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
snake_case_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
snake_case_ = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
snake_case_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
snake_case_ = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
snake_case_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
snake_case_ = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
snake_case_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCamelCase__ ( _A , _A , _A , _A=False ):
'''simple docstring'''
if split_mlp_wi:
snake_case_ = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
snake_case_ = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
snake_case_ = (wi_a, wi_a)
else:
snake_case_ = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
snake_case_ = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def lowerCamelCase__ ( _A , *, _A , _A , _A = False ):
'''simple docstring'''
snake_case_ = traverse_util.flatten_dict(variables["target"] )
snake_case_ = {"/".join(_A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , _A )
snake_case_ = collections.OrderedDict()
# Shared embeddings.
snake_case_ = old["token_embedder/embedding"]
# Encoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
snake_case_ = tax_layer_norm_lookup(_A , _A , "encoder" , "pre_attention_layer_norm" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = tax_attention_lookup(_A , _A , "encoder" , "attention" )
snake_case_ = layer_norm
snake_case_ = k.T
snake_case_ = o.T
snake_case_ = q.T
snake_case_ = v.T
# Block i, layer 1 (MLP).
snake_case_ = tax_layer_norm_lookup(_A , _A , "encoder" , "pre_mlp_layer_norm" )
snake_case_ , snake_case_ = tax_mlp_lookup(_A , _A , "encoder" , _A )
snake_case_ = layer_norm
if split_mlp_wi:
snake_case_ = wi[0].T
snake_case_ = wi[1].T
else:
snake_case_ = wi.T
snake_case_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case_ = tax_relpos_bias_lookup(
_A , _A , "encoder" ).T
snake_case_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
snake_case_ = tax_relpos_bias_lookup(
_A , 0 , "encoder" ).T
snake_case_ = tax_relpos_bias_lookup(
_A , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
snake_case_ = tax_layer_norm_lookup(_A , _A , "decoder" , "pre_self_attention_layer_norm" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = tax_attention_lookup(_A , _A , "decoder" , "self_attention" )
snake_case_ = layer_norm
snake_case_ = k.T
snake_case_ = o.T
snake_case_ = q.T
snake_case_ = v.T
# Block i, layer 1 (Cross Attention).
snake_case_ = tax_layer_norm_lookup(_A , _A , "decoder" , "pre_cross_attention_layer_norm" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = tax_attention_lookup(_A , _A , "decoder" , "encoder_decoder_attention" )
snake_case_ = layer_norm
snake_case_ = k.T
snake_case_ = o.T
snake_case_ = q.T
snake_case_ = v.T
# Block i, layer 2 (MLP).
snake_case_ = tax_layer_norm_lookup(_A , _A , "decoder" , "pre_mlp_layer_norm" )
snake_case_ , snake_case_ = tax_mlp_lookup(_A , _A , "decoder" , _A )
snake_case_ = layer_norm
if split_mlp_wi:
snake_case_ = wi[0].T
snake_case_ = wi[1].T
else:
snake_case_ = wi.T
snake_case_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case_ = tax_relpos_bias_lookup(_A , _A , "decoder" ).T
snake_case_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case_ = old["decoder/logits_dense/kernel"].T
return new
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
snake_case_ = state_dict["shared.weight"]
return state_dict
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = checkpoints.load_tax_checkpoint(_A )
snake_case_ = convert_tax_to_pytorch(
_A , num_layers=config.num_layers , is_encoder_only=_A , scalable_attention=_A )
snake_case_ = make_state_dict(_A , _A )
model.load_state_dict(_A , strict=_A )
def lowerCamelCase__ ( _A , _A , _A , _A = False , _A = False , ):
'''simple docstring'''
snake_case_ = MTaConfig.from_json_file(_A )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case_ = UMTaEncoderModel(_A )
else:
snake_case_ = UMTaForConditionalGeneration(_A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_A , _A , _A , _A , _A )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_A )
# Verify that we can load the checkpoint.
model.from_pretrained(_A )
print("Done" )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
lowercase__ : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 139
| 1
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a__ ( _UpperCamelCase : Tuple ):
__lowerCamelCase = model.config
__lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size ,patch_size=4 ,depths=original_config.encoder_layer ,num_heads=[4, 8, 16, 32] ,window_size=original_config.window_size ,embed_dim=1_28 ,)
__lowerCamelCase = MBartConfig(
is_decoder=__snake_case ,is_encoder_decoder=__snake_case ,add_cross_attention=__snake_case ,decoder_layers=original_config.decoder_layer ,max_position_embeddings=original_config.max_position_embeddings ,vocab_size=len(
model.decoder.tokenizer ) ,scale_embedding=__snake_case ,add_final_layer_norm=__snake_case ,)
return encoder_config, decoder_config
def a__ ( _UpperCamelCase : Dict ):
if "encoder.model" in name:
__lowerCamelCase = name.replace('''encoder.model''' ,'''encoder''' )
if "decoder.model" in name:
__lowerCamelCase = name.replace('''decoder.model''' ,'''decoder''' )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
__lowerCamelCase = """encoder.""" + name
if "attn.proj" in name:
__lowerCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name and "mask" not in name:
__lowerCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
__lowerCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
__lowerCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if name == "encoder.norm.weight":
__lowerCamelCase = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
__lowerCamelCase = """encoder.layernorm.bias"""
return name
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ):
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(__snake_case )
if "qkv" in key:
__lowerCamelCase = key.split('''.''' )
__lowerCamelCase = int(key_split[3] )
__lowerCamelCase = int(key_split[5] )
__lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__lowerCamelCase = val
return orig_state_dict
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : str=None ,_UpperCamelCase : Tuple=False ):
__lowerCamelCase = DonutModel.from_pretrained(__snake_case ).eval()
# load HuggingFace model
__lowerCamelCase = get_configs(__snake_case )
__lowerCamelCase = DonutSwinModel(__snake_case )
__lowerCamelCase = MBartForCausalLM(__snake_case )
__lowerCamelCase = VisionEncoderDecoderModel(encoder=__snake_case ,decoder=__snake_case )
model.eval()
__lowerCamelCase = original_model.state_dict()
__lowerCamelCase = convert_state_dict(__snake_case ,__snake_case )
model.load_state_dict(__snake_case )
# verify results on scanned document
__lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
__lowerCamelCase = dataset["""test"""][0]["""image"""].convert('''RGB''' )
__lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__snake_case ,from_slow=__snake_case )
__lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis ,size=original_model.config.input_size[::-1] )
__lowerCamelCase = DonutProcessor(__snake_case ,__snake_case )
__lowerCamelCase = processor(__snake_case ,return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__lowerCamelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowerCamelCase = """When is the coffee break?"""
__lowerCamelCase = task_prompt.replace('''{user_input}''' ,__snake_case )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__lowerCamelCase = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__lowerCamelCase = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__lowerCamelCase = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__lowerCamelCase = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__lowerCamelCase = """hello world"""
else:
raise ValueError('''Model name not supported''' )
__lowerCamelCase = original_model.decoder.tokenizer(__snake_case ,add_special_tokens=__snake_case ,return_tensors='''pt''' )[
"""input_ids"""
]
__lowerCamelCase = original_model.encoder.model.patch_embed(__snake_case )
__lowerCamelCase = model.encoder.embeddings(__snake_case )
assert torch.allclose(__snake_case ,__snake_case ,atol=1e-3 )
# verify encoder hidden states
__lowerCamelCase = original_model.encoder(__snake_case )
__lowerCamelCase = model.encoder(__snake_case ).last_hidden_state
assert torch.allclose(__snake_case ,__snake_case ,atol=1e-2 )
# verify decoder hidden states
__lowerCamelCase = original_model(__snake_case ,__snake_case ,__snake_case ).logits
__lowerCamelCase = model(__snake_case ,decoder_input_ids=__snake_case ).logits
assert torch.allclose(__snake_case ,__snake_case ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] ,commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] ,commit_message='''Update model''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
a_ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 175
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 215
| 0
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def snake_case__ ( _lowerCamelCase ) ->List[str]:
"""simple docstring"""
__lowercase : Tuple = np.max(_outputs, axis=-1, keepdims=_lowerCamelCase )
__lowercase : Tuple = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=_lowerCamelCase )
class lowerCAmelCase__ ( _snake_case ):
"""simple docstring"""
__UpperCAmelCase : Dict = """sigmoid"""
__UpperCAmelCase : Any = """softmax"""
__UpperCAmelCase : Optional[int] = """none"""
@add_end_docstrings(
_snake_case , r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class lowerCAmelCase__ ( _snake_case ):
"""simple docstring"""
__UpperCAmelCase : Any = False
__UpperCAmelCase : Dict = ClassificationFunction.NONE
def __init__( self : Any , **lowercase__ : List[Any] ):
super().__init__(**snake_case_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def snake_case ( self : Union[str, Any] , lowercase__ : Optional[Any]=None , lowercase__ : Optional[int]=None , lowercase__ : Optional[int]="" , **lowercase__ : List[str] ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__lowercase : str = tokenizer_kwargs
__lowercase : str = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
__lowercase : str = self.model.config.return_all_scores
if isinstance(snake_case_ , snake_case_ ) or top_k is None:
__lowercase : Optional[Any] = top_k
__lowercase : List[str] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , snake_case_ , )
if return_all_scores:
__lowercase : Optional[int] = None
else:
__lowercase : Dict = 1
if isinstance(snake_case_ , snake_case_ ):
__lowercase : List[Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__lowercase : List[str] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , *lowercase__ : List[Any] , **lowercase__ : List[Any] ):
__lowercase : Dict = super().__call__(*snake_case_ , **snake_case_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__lowercase : Tuple = "top_k" not in kwargs
if isinstance(args[0] , snake_case_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case ( self : str , lowercase__ : Optional[Any] , **lowercase__ : Optional[int] ):
__lowercase : str = self.framework
if isinstance(snake_case_ , snake_case_ ):
return self.tokenizer(**snake_case_ , return_tensors=snake_case_ , **snake_case_ )
elif isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1 and isinstance(inputs[0] , snake_case_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case_ , **snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
def snake_case ( self : List[Any] , lowercase__ : int ):
return self.model(**snake_case_ )
def snake_case ( self : Any , lowercase__ : str , lowercase__ : List[Any]=None , lowercase__ : int=1 , lowercase__ : List[str]=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__lowercase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__lowercase : Union[str, Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
__lowercase : Any = self.model.config.function_to_apply
else:
__lowercase : str = ClassificationFunction.NONE
__lowercase : Optional[Any] = model_outputs["logits"][0]
__lowercase : Optional[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__lowercase : Optional[Any] = sigmoid(snake_case_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__lowercase : Tuple = softmax(snake_case_ )
elif function_to_apply == ClassificationFunction.NONE:
__lowercase : Dict = outputs
else:
raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__lowercase : List[Any] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(snake_case_ )
]
if not _legacy:
dict_scores.sort(key=lambda lowercase__ : x["score"] , reverse=snake_case_ )
if top_k is not None:
__lowercase : int = dict_scores[:top_k]
return dict_scores
| 721
|
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__A : str = tuple[int, int]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : int , lowercase__ : set[int] , lowercase__ : Mapping[EdgeT, int] ):
__lowercase : set[int] = vertices
__lowercase : dict[EdgeT, int] = {
(min(lowercase__ ), max(lowercase__ )): weight for edge, weight in edges.items()
}
def snake_case ( self : Optional[Any] , lowercase__ : EdgeT , lowercase__ : int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowercase : Union[str, Any] = weight
def snake_case ( self : Dict ):
__lowercase : Graph = Graph({min(self.vertices )} , {} )
__lowercase : EdgeT
__lowercase : int
__lowercase : EdgeT
__lowercase : int
while len(subgraph.vertices ) < len(self.vertices ):
__lowercase : int = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowercase : Optional[Any] = edge
__lowercase : List[str] = weight
subgraph.add_edge(lowercase__ , lowercase__ )
return subgraph
def snake_case__ ( _lowerCamelCase = "p107_network.txt" ) ->int:
"""simple docstring"""
__lowercase : str = os.path.abspath(os.path.dirname(_lowerCamelCase ) )
__lowercase : str = os.path.join(_lowerCamelCase, _lowerCamelCase )
__lowercase : dict[EdgeT, int] = {}
__lowercase : list[str]
__lowercase : int
__lowercase : int
with open(_lowerCamelCase ) as f:
__lowercase : List[str] = f.read().strip().split("\n" )
__lowercase : Any = [line.split("," ) for line in data]
for edgea in range(1, len(_lowerCamelCase ) ):
for edgea in range(_lowerCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
__lowercase : Dict = int(adjaceny_matrix[edgea][edgea] )
__lowercase : Graph = Graph(set(range(len(_lowerCamelCase ) ) ), _lowerCamelCase )
__lowercase : Graph = graph.prims_algorithm()
__lowercase : int = sum(graph.edges.values() )
__lowercase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 281
| 0
|
import numpy as np
from PIL import Image
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase__ : Dict = np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Optional[int] = 0
# compute the shape of the output matrix
UpperCAmelCase__ : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCAmelCase__ : Any = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCAmelCase__ : int = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Tuple = 0
return updated_arr
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCAmelCase__ : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCAmelCase__ : Dict = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCAmelCase__ : int = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
SCREAMING_SNAKE_CASE__ : Tuple = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 79
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowerCamelCase :
"""simple docstring"""
# setable values
snake_case = None
snake_case = None
snake_case = None # sigma(t_i)
@classmethod
def _snake_case ( cls )->int:
'''simple docstring'''
return cls()
@dataclass
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = 42
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self )->Dict:
'''simple docstring'''
return True
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 1.0_0_7 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 0.0_5 , _SCREAMING_SNAKE_CASE = 50 , )->Union[str, Any]:
'''simple docstring'''
pass
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = () )->KarrasVeSchedulerState:
'''simple docstring'''
A_ : str = jnp.arange(0 , _SCREAMING_SNAKE_CASE )[::-1].copy()
A_ : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_SCREAMING_SNAKE_CASE , schedule=jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) , timesteps=_SCREAMING_SNAKE_CASE , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )->Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A_ : List[Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
A_ : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
A_ : Any = random.split(_SCREAMING_SNAKE_CASE , num=1 )
A_ : List[str] = self.config.s_noise * random.normal(key=_SCREAMING_SNAKE_CASE , shape=sample.shape )
A_ : Optional[Any] = sigma + gamma * sigma
A_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : int = sample_hat + sigma_hat * model_output
A_ : Dict = (sample_hat - pred_original_sample) / sigma_hat
A_ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : List[Any] = sample_prev + sigma_prev * model_output
A_ : str = (sample_prev - pred_original_sample) / sigma_prev
A_ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
| 590
| 0
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''owlvit_text_model'''
def __init__( self : int , UpperCAmelCase__ : List[Any]=49_408 , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : Optional[int]=2_048 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : Optional[int]=16 , UpperCAmelCase__ : List[Any]="quick_gelu" , UpperCAmelCase__ : str=1e-5 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : Tuple=1.0 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : Dict=49_406 , UpperCAmelCase__ : Tuple=49_407 , **UpperCAmelCase__ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = max_position_embeddings
A__ = hidden_act
A__ = layer_norm_eps
A__ = attention_dropout
A__ = initializer_range
A__ = initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Dict) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''') == "owlvit":
A__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''owlvit_vision_model'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Optional[int]=3_072 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : str="quick_gelu" , UpperCAmelCase__ : Union[str, Any]=1e-5 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : List[Any]=1.0 , **UpperCAmelCase__ : Dict , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = image_size
A__ = patch_size
A__ = hidden_act
A__ = layer_norm_eps
A__ = attention_dropout
A__ = initializer_range
A__ = initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : str) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''') == "owlvit":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''owlvit'''
UpperCAmelCase__ = True
def __init__( self : str , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : str=2.6592 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Dict , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
if text_config is None:
A__ = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''')
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''')
A__ = OwlViTTextConfig(**UpperCAmelCase__)
A__ = OwlViTVisionConfig(**UpperCAmelCase__)
A__ = projection_dim
A__ = logit_scale_init_value
A__ = return_dict
A__ = 1.0
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Optional[Any]) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->List[Any]:
'''simple docstring'''
A__ = {}
A__ = text_config
A__ = vision_config
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
])
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
])
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->float:
'''simple docstring'''
return 1e-4
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : "ProcessorMixin" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : Optional["TensorType"] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
A__ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , framework=UpperCAmelCase__)
A__ = super().generate_dummy_inputs(
processor.image_processor , batch_size=UpperCAmelCase__ , framework=UpperCAmelCase__)
return {**text_input_dict, **image_input_dict}
@property
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
return 14
| 702
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = XGLMConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=14 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[str]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : List[Any]=0.02 , ) ->str:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = d_model
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = ffn_dim
A__ = activation_function
A__ = activation_dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = None
A__ = 0
A__ = 2
A__ = 1
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
return XGLMConfig.from_pretrained('''facebook/xglm-564M''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = self.get_config()
A__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = TFXGLMModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , n_embd=37)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFXGLMModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''')
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=True) ->Union[str, Any]:
'''simple docstring'''
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
A__ = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
A__ = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
A__ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
tf.random.set_seed(0)
A__ = tokenizer('''Today is a nice day and''' , return_tensors='''tf''')
A__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0'''):
A__ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , seed=[7, 0])
A__ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__)
A__ = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
A__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
A__ = '''left'''
# use different length sentences to test batching
A__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
A__ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' , padding=UpperCAmelCase__)
A__ = inputs['''input_ids''']
A__ = model.generate(input_ids=UpperCAmelCase__ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12)
A__ = tokenizer(sentences[0] , return_tensors='''tf''').input_ids
A__ = model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=12)
A__ = tokenizer(sentences[1] , return_tensors='''tf''').input_ids
A__ = model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=12)
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__)
A__ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__)
A__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence])
| 177
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 411
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: str )-> int:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Tuple , lowerCAmelCase: str="attention" )-> List[Any]:
_snake_case : Optional[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_snake_case : Optional[int] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_snake_case : List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_snake_case : Optional[int] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_snake_case : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_snake_case : Union[str, Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_snake_case : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_snake_case : int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: Any , lowerCAmelCase: Union[str, Any]=False )-> int:
if split_mlp_wi:
_snake_case : Dict = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_snake_case : Dict = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_snake_case : List[str] = (wi_a, wi_a)
else:
_snake_case : Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_snake_case : Any = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: int , lowerCAmelCase: Optional[int] )-> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCamelCase_ ( lowerCAmelCase: dict , *, lowerCAmelCase: int , lowerCAmelCase: bool , lowerCAmelCase: bool = False )-> str:
_snake_case : List[Any] = traverse_util.flatten_dict(variables['target'] )
_snake_case : Dict = {'/'.join(lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_snake_case : str = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , lowerCAmelCase )
_snake_case : Any = collections.OrderedDict()
# Shared embeddings.
_snake_case : Optional[int] = old['token_embedder/embedding']
# Encoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_snake_case : Tuple = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'encoder' , 'pre_attention_layer_norm' )
_snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , 'encoder' , 'attention' )
_snake_case : List[str] = layer_norm
_snake_case : Tuple = k.T
_snake_case : str = o.T
_snake_case : Tuple = q.T
_snake_case : Optional[Any] = v.T
# Block i, layer 1 (MLP).
_snake_case : Any = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'encoder' , 'pre_mlp_layer_norm' )
_snake_case , _snake_case : List[Any] = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , 'encoder' , lowerCAmelCase )
_snake_case : int = layer_norm
if split_mlp_wi:
_snake_case : Union[str, Any] = wi[0].T
_snake_case : Any = wi[1].T
else:
_snake_case : str = wi.T
_snake_case : List[str] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_snake_case : Optional[Any] = tax_relpos_bias_lookup(
lowerCAmelCase , lowerCAmelCase , 'encoder' ).T
_snake_case : int = old['encoder/encoder_norm/scale']
if not scalable_attention:
_snake_case : Any = tax_relpos_bias_lookup(
lowerCAmelCase , 0 , 'encoder' ).T
_snake_case : Optional[int] = tax_relpos_bias_lookup(
lowerCAmelCase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_snake_case : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'pre_self_attention_layer_norm' )
_snake_case , _snake_case , _snake_case , _snake_case : List[str] = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'self_attention' )
_snake_case : List[str] = layer_norm
_snake_case : Tuple = k.T
_snake_case : str = o.T
_snake_case : str = q.T
_snake_case : List[Any] = v.T
# Block i, layer 1 (Cross Attention).
_snake_case : List[str] = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'pre_cross_attention_layer_norm' )
_snake_case , _snake_case , _snake_case , _snake_case : Optional[Any] = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'encoder_decoder_attention' )
_snake_case : Optional[Any] = layer_norm
_snake_case : List[str] = k.T
_snake_case : Optional[Any] = o.T
_snake_case : Union[str, Any] = q.T
_snake_case : List[Any] = v.T
# Block i, layer 2 (MLP).
_snake_case : List[str] = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'pre_mlp_layer_norm' )
_snake_case , _snake_case : Tuple = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , lowerCAmelCase )
_snake_case : str = layer_norm
if split_mlp_wi:
_snake_case : List[str] = wi[0].T
_snake_case : List[str] = wi[1].T
else:
_snake_case : Union[str, Any] = wi.T
_snake_case : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_snake_case : Dict = tax_relpos_bias_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' ).T
_snake_case : Optional[int] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_snake_case : Union[str, Any] = old['decoder/logits_dense/kernel'].T
return new
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: bool )-> int:
_snake_case : Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_snake_case : str = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_snake_case : List[str] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_snake_case : Dict = state_dict['shared.weight']
return state_dict
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: str , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Optional[int]:
_snake_case : List[Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase )
_snake_case : Optional[int] = convert_tax_to_pytorch(
lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase , scalable_attention=lowerCAmelCase )
_snake_case : Tuple = make_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: List[Any] , lowerCAmelCase: Dict , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , )-> Tuple:
_snake_case : Union[str, Any] = MTaConfig.from_json_file(lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_snake_case : int = UMTaEncoderModel(lowerCAmelCase )
else:
_snake_case : int = UMTaForConditionalGeneration(lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase )
print('Done' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowerCAmelCase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 411
| 1
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
snake_case__ : str = gray_code_sequence_string(_lowerCAmelCase )
#
# convert them to integers
for i in range(len(_lowerCAmelCase ) ):
snake_case__ : Any = int(sequence[i] , 2 )
return sequence
def __snake_case( _lowerCAmelCase ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case__ : str = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case__ : Dict = gray_code_sequence_string(bit_count - 1 )
snake_case__ : Dict = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case__ : Any = """0""" + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case__ : Optional[int] = """1""" + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
# Base Case
if index == len(_lowerCAmelCase ):
return True
# Recursive Step
for i in range(_lowerCAmelCase ):
if valid_coloring(graph[index] , _lowerCAmelCase , _lowerCAmelCase ):
# Color current vertex
snake_case__ : List[Any] = i
# Validate coloring
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 ):
return True
# Backtrack
snake_case__ : Any = -1
return False
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
snake_case__ : Tuple = [-1] * len(_lowerCAmelCase )
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 ):
return colored_vertices
return []
| 301
| 1
|
import os
import sys
import unittest
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCamelCase__ = os.path.join(git_repo_path, "src", "diffusers")
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(A_ , """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCamelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(A_ , """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCamelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(A_ , """torch_and_transformers_and_onnx""" )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , A_ )
self.assertIn("""torch_and_transformers""" , A_ )
self.assertIn("""flax_and_transformers""" , A_ )
self.assertIn("""torch_and_transformers_and_onnx""" , A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" , objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""] )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(A_ , """\nCONSTANT = None\n""" )
_UpperCamelCase = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
A_ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_UpperCamelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_UpperCamelCase = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(A_ , A_ )
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_UpperCamelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , A_ )
| 612
|
"""simple docstring"""
def a_ ( lowercase__ :str, lowercase__ :int ):
return [sentence[i : i + ngram_size] for i in range(len(lowercase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281
| 0
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : Dict = use_input_mask
UpperCamelCase : Dict = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : Dict = vocab_size
UpperCamelCase : str = hidden_size
UpperCamelCase : Optional[int] = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Dict = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : Tuple = type_vocab_size
UpperCamelCase : Union[str, Any] = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Optional[Any] = num_labels
UpperCamelCase : List[str] = num_choices
UpperCamelCase : List[str] = scope
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Tuple = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = True
UpperCamelCase : Dict = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[Any] = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : str = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : List[Any] = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = True
UpperCamelCase : int = True
UpperCamelCase : Dict = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
UpperCamelCase : Optional[int] = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase : int = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
UpperCamelCase : int = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
UpperCamelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase : Optional[int] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase : List[str] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Any = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = OpenLlamaModelTester(self )
UpperCamelCase : Tuple = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase : Dict = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[Any] = 3
UpperCamelCase : int = input_dict['''input_ids''']
UpperCamelCase : str = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase : Optional[Any] = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[Any] = 3
UpperCamelCase : Union[str, Any] = '''single_label_classification'''
UpperCamelCase : Any = input_dict['''input_ids''']
UpperCamelCase : Tuple = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase : Optional[Any] = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : str = 3
UpperCamelCase : Optional[int] = '''multi_label_classification'''
UpperCamelCase : str = input_dict['''input_ids''']
UpperCamelCase : str = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase : Dict = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase : Optional[int] = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
UpperCamelCase : int = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
UpperCamelCase : List[str] = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase : Tuple = {'''type''': scaling_type, '''factor''': 10.0}
UpperCamelCase : List[str] = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
UpperCamelCase : str = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
UpperCamelCase : List[Any] = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
| 643
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643
| 1
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__snake_case : Optional[int] = logging.get_logger(__name__)
class A :
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_=None , snake_case_=None ) -> str:
if not conversation_id:
_a = uuid.uuida()
if past_user_inputs is None:
_a = []
if generated_responses is None:
_a = []
_a = conversation_id
_a = past_user_inputs
_a = generated_responses
_a = text
def __eq__( self , snake_case_ ) -> Optional[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = False ) -> str:
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
_a = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
_a = text
def __lowerCAmelCase ( self ) -> Union[str, Any]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_a = None
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
self.generated_responses.append(_lowerCamelCase )
def __lowerCAmelCase ( self ) -> Any:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Optional[Any]:
_a = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
_a = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
A__ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class A ( A__ ):
def __init__( self , *snake_case_ , **snake_case_ ) -> str:
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
if self.tokenizer.pad_token_id is None:
_a = self.tokenizer.eos_token
def __lowerCAmelCase ( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ) -> Optional[Any]:
_a = {}
_a = {}
_a = {}
if min_length_for_response is not None:
_a = min_length_for_response
if minimum_tokens is not None:
_a = minimum_tokens
if "max_length" in generate_kwargs:
_a = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_a = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , snake_case_ , snake_case_=0 , **snake_case_ ) -> Any:
_a = super().__call__(_lowerCamelCase , num_workers=_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
def __lowerCAmelCase ( self , snake_case_ , snake_case_=3_2 ) -> str:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"Add user inputs with the conversation\'s `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
_a = self.tokenizer._build_conversation_input_ids(_lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_a = self._legacy_parse_and_tokenize(_lowerCamelCase )
if self.framework == "pt":
_a = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_a = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __lowerCAmelCase ( self , snake_case_ , snake_case_=1_0 , **snake_case_ ) -> int:
_a = generate_kwargs.get("max_length" , self.model.config.max_length )
_a = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
_a = max_length - minimum_tokens
_a = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_a = model_inputs['attention_mask'][:, -trim:]
_a = model_inputs.pop("conversation" )
_a = max_length
_a = self.model.generate(**_lowerCamelCase , **_lowerCamelCase )
if self.model.config.is_encoder_decoder:
_a = 1
else:
_a = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __lowerCAmelCase ( self , snake_case_ , snake_case_=True ) -> Dict:
_a = model_outputs['output_ids']
_a = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , )
_a = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(_lowerCamelCase )
return conversation
def __lowerCAmelCase ( self , snake_case_ ) -> int:
_a = self.tokenizer.eos_token_id
_a = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) )
if len(_lowerCamelCase ) > self.tokenizer.model_max_length:
_a = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 131
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def A_ ( __lowercase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def A_ ( __lowercase ):
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase_ : Union[str, Any] =ord(__lowercase )
if not _is_chinese_char(__lowercase ):
return 0
return 1
def A_ ( __lowercase ):
UpperCamelCase_ : List[str] =set()
for token in tokens:
UpperCamelCase_ : Optional[int] =len(__lowercase ) > 1 and is_chinese(__lowercase )
if chinese_word:
word_set.add(__lowercase )
UpperCamelCase_ : Tuple =list(__lowercase )
return word_list
def A_ ( __lowercase , __lowercase ):
if not chinese_word_set:
return bert_tokens
UpperCamelCase_ : List[str] =max([len(__lowercase ) for w in chinese_word_set] )
UpperCamelCase_ : Optional[Any] =bert_tokens
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] =0, len(__lowercase )
while start < end:
UpperCamelCase_ : str =True
if is_chinese(bert_word[start] ):
UpperCamelCase_ : Optional[int] =min(end - start , __lowercase )
for i in range(__lowercase , 1 , -1 ):
UpperCamelCase_ : Tuple =''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase_ : Tuple ='##' + bert_word[j]
UpperCamelCase_ : int =start + i
UpperCamelCase_ : Dict =False
break
if single_word:
start += 1
return bert_word
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : Tuple =[]
for i in range(0 , len(__lowercase ) , 1_00 ):
UpperCamelCase_ : Union[str, Any] =ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
UpperCamelCase_ : int =[get_chinese_word(__lowercase ) for r in res]
ltp_res.extend(__lowercase )
assert len(__lowercase ) == len(__lowercase )
UpperCamelCase_ : Dict =[]
for i in range(0 , len(__lowercase ) , 1_00 ):
UpperCamelCase_ : int =bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowercase , truncation=__lowercase , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(__lowercase ) == len(__lowercase )
UpperCamelCase_ : Dict =[]
for input_ids, chinese_word in zip(__lowercase , __lowercase ):
UpperCamelCase_ : List[str] =[]
for id in input_ids:
UpperCamelCase_ : Union[str, Any] =bert_tokenizer._convert_id_to_token(__lowercase )
input_tokens.append(__lowercase )
UpperCamelCase_ : Optional[int] =add_sub_symbol(__lowercase , __lowercase )
UpperCamelCase_ : Dict =[]
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowercase ):
if token[:2] == "##":
UpperCamelCase_ : Optional[int] =token[2:]
# save chinese tokens' pos
if len(__lowercase ) == 1 and _is_chinese_char(ord(__lowercase ) ):
ref_id.append(__lowercase )
ref_ids.append(__lowercase )
assert len(__lowercase ) == len(__lowercase )
return ref_ids
def A_ ( __lowercase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCamelCase_ : Tuple =f.readlines()
UpperCamelCase_ : Optional[int] =[line.strip() for line in data if len(__lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase_ : Optional[Any] =LTP(args.ltp ) # faster in GPU device
UpperCamelCase_ : Dict =BertTokenizer.from_pretrained(args.bert )
UpperCamelCase_ : int =prepare_ref(__lowercase , __lowercase , __lowercase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCamelCase_ : Tuple =[json.dumps(__lowercase ) + '\n' for ref in ref_ids]
f.writelines(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 357
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE = random.Random()
if is_torch_available():
import torch
def a (lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=400 , __A=2000 , __A=1 , __A=0.0 , __A=16000 , __A=True , __A=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def snake_case_ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self , __A=False , __A=False ):
def _flatten(__A ):
return list(itertools.chain(*__A ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ASTFeatureExtractor
def snake_case_ ( self ):
__a = ASTFeatureExtractionTester(self )
def snake_case_ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(__A ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test batched
__a = feat_extract(__A , padding=__A , return_tensors="""np""" ).input_values
__a = feat_extract(__A , padding=__A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(__A )
__a = feat_extract(__A , return_tensors="""np""" ).input_values
__a = feat_extract(__A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
@require_torch
def snake_case_ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case_ ( self , __A ):
from datasets import load_dataset
__a = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__a = ds.sort("""id""" ).select(range(__A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def snake_case_ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(__A , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __A , atol=1E-4 ) )
| 209
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = LDMTextToImagePipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = False
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a = CLIPTextModel(__A )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def snake_case_ ( self , __A , __A=0 ):
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ):
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = LDMTextToImagePipeline(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_dummy_inputs(__A )
__a = pipe(**__A ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__a = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self , __A , __A=torch.floataa , __A=0 ):
__a = torch.manual_seed(__A )
__a = np.random.RandomState(__A ).standard_normal((1, 4, 32, 32) )
__a = torch.from_numpy(__A ).to(device=__A , dtype=__A )
__a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ):
__a = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_inputs(__A )
__a = pipe(**__A ).images
__a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__a = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
__a = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self , __A , __A=torch.floataa , __A=0 ):
__a = torch.manual_seed(__A )
__a = np.random.RandomState(__A ).standard_normal((1, 4, 32, 32) )
__a = torch.from_numpy(__A ).to(device=__A , dtype=__A )
__a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ):
__a = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_inputs(__A )
__a = pipe(**__A ).images[0]
__a = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
__a = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 209
| 1
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( a_ , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = BloomTokenizerFast
__UpperCAmelCase = BloomTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = """tokenizer_file"""
__UpperCAmelCase = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def a__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
__magic_name__ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : List[Any] , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def a__ ( self : str ):
'''simple docstring'''
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
__magic_name__ = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
__magic_name__ = tokenizer.batch_encode_plus(UpperCamelCase_ )['input_ids']
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def a__ ( self : Tuple , UpperCamelCase_ : Tuple=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__magic_name__ = 'This is a simple input'
__magic_name__ = ['This is a simple input 1', 'This is a simple input 2']
__magic_name__ = ('This is a simple input', 'This is a pair')
__magic_name__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase_ , max_length=UpperCamelCase_ )
tokenizer_r.encode_plus(UpperCamelCase_ , max_length=UpperCamelCase_ )
tokenizer_r.batch_encode_plus(UpperCamelCase_ , max_length=UpperCamelCase_ )
tokenizer_r.encode(UpperCamelCase_ , max_length=UpperCamelCase_ )
tokenizer_r.batch_encode_plus(UpperCamelCase_ , max_length=UpperCamelCase_ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
__magic_name__ = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' , )
def a__ ( self : Dict ):
'''simple docstring'''
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=UpperCamelCase_ )
__magic_name__ = next(iter(UpperCamelCase_ ) )['premise'] # pick up one data
__magic_name__ = list(sample_data.values() )
__magic_name__ = list(map(tokenizer.encode , UpperCamelCase_ ) )
__magic_name__ = [tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) for x in output_tokens]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def a__ ( self : Tuple ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 545
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A ( __snake_case: Tuple ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def A ( __snake_case: str ) -> Optional[Any]:
"""simple docstring"""
for char in word:
__magic_name__ = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def A ( __snake_case: List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = set()
for token in tokens:
__magic_name__ = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
__magic_name__ = list(__snake_case )
return word_list
def A ( __snake_case: List[str] , __snake_case: set() ) -> str:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__magic_name__ = max([len(__snake_case ) for w in chinese_word_set] )
__magic_name__ = bert_tokens
__magic_name__ , __magic_name__ = 0, len(__snake_case )
while start < end:
__magic_name__ = True
if is_chinese(bert_word[start] ):
__magic_name__ = min(end - start , __snake_case )
for i in range(__snake_case , 1 , -1 ):
__magic_name__ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__magic_name__ = '##' + bert_word[j]
__magic_name__ = start + i
__magic_name__ = False
break
if single_word:
start += 1
return bert_word
def A ( __snake_case: List[str] , __snake_case: LTP , __snake_case: BertTokenizer ) -> List[Any]:
"""simple docstring"""
__magic_name__ = []
for i in range(0 , len(__snake_case ) , 1_0_0 ):
__magic_name__ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
__magic_name__ = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
__magic_name__ = []
for i in range(0 , len(__snake_case ) , 1_0_0 ):
__magic_name__ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(__snake_case ) == len(__snake_case )
__magic_name__ = []
for input_ids, chinese_word in zip(__snake_case , __snake_case ):
__magic_name__ = []
for id in input_ids:
__magic_name__ = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
__magic_name__ = add_sub_symbol(__snake_case , __snake_case )
__magic_name__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
__magic_name__ = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def A ( __snake_case: Optional[Any] ) -> List[str]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__magic_name__ = f.readlines()
__magic_name__ = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__magic_name__ = LTP(args.ltp ) # faster in GPU device
__magic_name__ = BertTokenizer.from_pretrained(args.bert )
__magic_name__ = prepare_ref(__snake_case , __snake_case , __snake_case )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__magic_name__ = [json.dumps(__snake_case ) + '\n' for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
snake_case : Any = parser.parse_args()
main(args)
| 545
| 1
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A_ ( unittest.TestCase ):
def _snake_case ( self : Any ) -> Union[str, Any]:
__magic_name__ = 0
@slow
def _snake_case ( self : Any ) -> Union[str, Any]:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def _snake_case ( self : List[Any] ) -> List[str]:
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def _snake_case ( self : Any ) -> Optional[int]:
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def _snake_case ( self : int ) -> List[str]:
__magic_name__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def _snake_case ( self : Any ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(__lowerCamelCase , "vocab.txt" ) )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type="bert" , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(__lowerCamelCase , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(__lowerCamelCase , "merges.txt" ) )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type="gpt2" , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def _snake_case ( self : List[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(__lowerCamelCase , "vocab.txt" ) )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type="bert" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(__lowerCamelCase , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(__lowerCamelCase , "merges.txt" ) )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type="gpt2" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Tuple ) -> Optional[int]:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def _snake_case ( self : Any ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__magic_name__ = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def _snake_case ( self : Optional[Any] ) -> Union[str, Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
__magic_name__ = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def _snake_case ( self : Optional[int] ) -> List[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__magic_name__ = TOKENIZER_MAPPING.values()
__magic_name__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def _snake_case ( self : Tuple ) -> Dict:
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , __lowerCamelCase )
@require_tokenizers
def _snake_case ( self : List[str] ) -> Dict:
__magic_name__ = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=__lowerCamelCase )
__magic_name__ = "Hello, world. How are you?"
__magic_name__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual("[UNK]" , tokens[0] )
__magic_name__ = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=__lowerCamelCase )
__magic_name__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def _snake_case ( self : Dict ) -> Union[str, Any]:
__magic_name__ = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def _snake_case ( self : int ) -> Dict:
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def _snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__ = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Optional[int] ) -> Union[str, Any]:
# Check we can load the tokenizer config of an online model.
__magic_name__ = get_tokenizer_config("bert-base-cased" )
__magic_name__ = config.pop("_commit_hash" , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__magic_name__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def _snake_case ( self : str ) -> List[str]:
try:
AutoConfig.register("custom" , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
__magic_name__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _snake_case ( self : str ) -> Tuple:
try:
AutoConfig.register("custom" , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _snake_case ( self : List[Any] ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
__magic_name__ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
__magic_name__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
__magic_name__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def _snake_case ( self : Tuple ) -> Optional[int]:
class A_ ( snake_case_ ):
UpperCAmelCase__ = False
class A_ ( snake_case_ ):
UpperCAmelCase__ = NewTokenizer
UpperCAmelCase__ = False
try:
AutoConfig.register("custom" , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
__magic_name__ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
__magic_name__ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__magic_name__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
__magic_name__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__magic_name__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
__magic_name__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _snake_case ( self : Dict ) -> Any:
__magic_name__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
__magic_name__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def _snake_case ( self : List[str] ) -> Union[str, Any]:
with self.assertRaisesRegex(
__lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
__magic_name__ = AutoTokenizer.from_pretrained("bert-base" )
def _snake_case ( self : str ) -> List[str]:
with self.assertRaisesRegex(
__lowerCamelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision="aaaaaa" )
def _snake_case ( self : Optional[Any] ) -> str:
# Make sure we have cached the tokenizer.
__magic_name__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__magic_name__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 720
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _lowerCAmelCase ( __lowerCamelCase:List[Any] , __lowerCamelCase:int , __lowerCamelCase:List[Any]=None , __lowerCamelCase:Any=None , __lowerCamelCase:Any=None , __lowerCamelCase:List[str]=None , __lowerCamelCase:Optional[int]=None , __lowerCamelCase:Optional[int]=None , ):
'''simple docstring'''
if attention_mask is None:
__magic_name__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__magic_name__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__magic_name__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
def __init__( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=9_9 , __lowerCamelCase : Tuple=1_6 , __lowerCamelCase : Any=2 , __lowerCamelCase : int=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[str]=3_2 , __lowerCamelCase : int=2 , __lowerCamelCase : Any=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Tuple=0.02 , ) -> List[Any]:
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = initializer_range
def _snake_case ( self : str ) -> List[str]:
__magic_name__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__magic_name__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__magic_name__ = shift_tokens_right(__lowerCamelCase , 1 , 2 )
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , )
__magic_name__ = prepare_blenderbot_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def _snake_case ( self : Optional[Any] ) -> Any:
__magic_name__ , __magic_name__ = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
__magic_name__ = 2_0
__magic_name__ = model_class_name(__lowerCamelCase )
__magic_name__ = model.encode(inputs_dict["input_ids"] )
__magic_name__ , __magic_name__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = model.decode(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _snake_case ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> int:
__magic_name__ = 2_0
__magic_name__ = model_class_name(__lowerCamelCase )
__magic_name__ = model.encode(inputs_dict["input_ids"] )
__magic_name__ , __magic_name__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__magic_name__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class A_ ( unittest.TestCase ):
UpperCAmelCase__ = 9_9
def _snake_case ( self : Dict ) -> Dict:
__magic_name__ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__magic_name__ = input_ids.shape[0]
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ , __magic_name__ , __magic_name__ = self._get_config_and_data()
__magic_name__ = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
__magic_name__ = lm_model(input_ids=__lowerCamelCase )
__magic_name__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__magic_name__ = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
__magic_name__ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__magic_name__ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__magic_name__ = lm_model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
__magic_name__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ) -> List[Any]:
__magic_name__ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__magic_name__ = shift_tokens_right(__lowerCamelCase , 1 , 2 )
__magic_name__ = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
__magic_name__ = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( snake_case_ , unittest.TestCase , snake_case_ ):
UpperCAmelCase__ = True
UpperCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self : List[Any] ) -> Any:
__magic_name__ = FlaxBlenderbotModelTester(self )
def _snake_case ( self : int ) -> Optional[Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Tuple ) -> Dict:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = model_class(__lowerCamelCase )
@jax.jit
def encode_jitted(__lowerCamelCase : Dict , __lowerCamelCase : str=None , **__lowerCamelCase : List[str] ):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
with self.subTest("JIT Enabled" ):
__magic_name__ = encode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__magic_name__ = encode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__magic_name__ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest("JIT Enabled" ):
__magic_name__ = decode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__magic_name__ = decode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) -> int:
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__magic_name__ = np.ones((1, 1) ) * model.config.eos_token_id
__magic_name__ = model(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def _snake_case ( self : int ) -> List[Any]:
__magic_name__ = {"num_beams": 1, "early_stopping": True, "min_length": 1_5, "max_length": 2_5}
__magic_name__ = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
__magic_name__ = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=__lowerCamelCase )
__magic_name__ = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
__magic_name__ = ["Sam"]
__magic_name__ = tokenizer(__lowerCamelCase , return_tensors="jax" )
__magic_name__ = model.generate(**__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = "Sam is a great name. It means \"sun\" in Gaelic."
__magic_name__ = tokenizer.batch_decode(__lowerCamelCase , **__lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 468
| 0
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__UpperCAmelCase = 'scheduler_config.json'
class __a ( __UpperCamelCase ):
__snake_case : str = 1
__snake_case : List[Any] = 2
__snake_case : Tuple = 3
__snake_case : Any = 4
__snake_case : Optional[Any] = 5
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : List[str] = 42
class __a :
__snake_case : List[str] = SCHEDULER_CONFIG_NAME
__snake_case : Union[str, Any] = ["""dtype"""]
__snake_case : Any = []
__snake_case : int = True
@classmethod
def A ( cls : Optional[int] , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Union[str, Any]=False , **UpperCAmelCase : Optional[Any] , ):
lowerCAmelCase_ : int = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase__ , subfolder=lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCAmelCase_ : Any = cls.from_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ , **lowerCamelCase__ )
if hasattr(lowerCamelCase__ , """create_state""" ) and getattr(lowerCamelCase__ , """has_state""" , lowerCamelCase__ ):
lowerCAmelCase_ : int = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def A ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : List[Any] ):
self.save_config(save_directory=lowerCamelCase__ , push_to_hub=lowerCamelCase__ , **lowerCamelCase__ )
@property
def A ( self : List[Any] ):
return self._get_compatibles()
@classmethod
def A ( cls : Dict ):
lowerCAmelCase_ : int = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase_ : Tuple = importlib.import_module(__name__.split(""".""" )[0] )
lowerCAmelCase_ : List[Any] = [
getattr(lowerCamelCase__ , lowerCamelCase__ ) for c in compatible_classes_str if hasattr(lowerCamelCase__ , lowerCamelCase__ )
]
return compatible_classes
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : int ) -> List[Any]:
'''simple docstring'''
assert len(UpperCAmelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(UpperCAmelCase_ ) - x.ndim) ) , UpperCAmelCase_ )
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : List[Any]=0.999 , lowercase__ : List[Any]=jnp.floataa ) -> List[Any]:
'''simple docstring'''
def alpha_bar(lowercase__ : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase_ : int = []
for i in range(UpperCAmelCase_ ):
lowerCAmelCase_ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(UpperCAmelCase_ ) / alpha_bar(UpperCAmelCase_ ) , UpperCAmelCase_ ) )
return jnp.array(UpperCAmelCase_ , dtype=UpperCAmelCase_ )
@flax.struct.dataclass
class __a :
__snake_case : Optional[int] = 42
__snake_case : Union[str, Any] = 42
__snake_case : Dict = 42
@classmethod
def A ( cls : str , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Union[str, Any] = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase_ : Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase_ : Optional[int] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase_ : Optional[Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCAmelCase_ : str = 1.0 - betas
lowerCAmelCase_ : Dict = jnp.cumprod(lowerCamelCase__ , axis=0 )
return cls(
alphas=lowerCamelCase__ , betas=lowerCamelCase__ , alphas_cumprod=lowerCamelCase__ , )
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = state.alphas_cumprod
lowerCAmelCase_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase_ : List[Any] = sqrt_alpha_prod.flatten()
lowerCAmelCase_ : Any = broadcast_to_shape_from_left(UpperCAmelCase_ , original_samples.shape )
lowerCAmelCase_ : str = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase_ : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase_ : Optional[Any] = broadcast_to_shape_from_left(UpperCAmelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = get_sqrt_alpha_prod(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase_ : Tuple = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __UpperCamelCase ( lowercase__ : Tuple , lowercase__ : int , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = get_sqrt_alpha_prod(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 600
|
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A__ ( UpperCAmelCase_ ):
if "model" in orig_key:
_UpperCamelCase : List[Any] = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
_UpperCamelCase : List[Any] = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
_UpperCamelCase : int = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
_UpperCamelCase : str = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
_UpperCamelCase : int = orig_key.split('.' )[0].split('_' )[-1]
_UpperCamelCase : Tuple = orig_key.replace(f'transformer_{layer_num}' , f'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
_UpperCamelCase : List[Any] = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
_UpperCamelCase : Union[str, Any] = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
_UpperCamelCase : Union[str, Any] = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
_UpperCamelCase : List[Any] = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
_UpperCamelCase : List[Any] = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
_UpperCamelCase : Optional[Any] = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
_UpperCamelCase : List[str] = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
_UpperCamelCase : Union[str, Any] = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
_UpperCamelCase : Tuple = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
_UpperCamelCase : int = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
_UpperCamelCase : Optional[int] = 'yoso.' + orig_key
return orig_key
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
for key in orig_state_dict.copy().keys():
_UpperCamelCase : Any = orig_state_dict.pop(UpperCAmelCase_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_UpperCamelCase : Union[str, Any] = val
_UpperCamelCase : Any = orig_state_dict['cls.predictions.decoder.bias']
_UpperCamelCase : Union[str, Any] = torch.arange(UpperCAmelCase_ ).expand((1, -1) ) + 2
return orig_state_dict
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )['model_state_dict']
_UpperCamelCase : List[str] = YosoConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = YosoForMaskedLM(UpperCAmelCase_ )
_UpperCamelCase : Tuple = convert_checkpoint_helper(config.max_position_embeddings , UpperCAmelCase_ )
print(model.load_state_dict(UpperCAmelCase_ ) )
model.eval()
model.save_pretrained(UpperCAmelCase_ )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : List[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 195
| 0
|
from __future__ import annotations
def __lowerCamelCase ( _lowercase ) -> bool:
return len(set(_lowercase ) ) == len(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170
|
def __lowerCamelCase ( _lowercase , _lowercase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowercase ) == len(_lowercase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
UpperCamelCase , UpperCamelCase , UpperCamelCase = equationa
UpperCamelCase , UpperCamelCase , UpperCamelCase = equationa
# Calculate the determinants of the matrices
UpperCamelCase = aa * ba - aa * ba
UpperCamelCase = ca * ba - ca * ba
UpperCamelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCamelCase = determinant_x / determinant
UpperCamelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 170
| 1
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> str:
if hor == 1_28:
__snake_case = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__snake_case = (32, 1_28, 2_56)
__snake_case = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
__snake_case = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__snake_case = (32, 64, 1_28, 2_56)
__snake_case = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
__snake_case = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
__snake_case = model.state_dict()
__snake_case = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_55_36,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
__snake_case = UNetaDModel(**_UpperCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__snake_case = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__snake_case = state_dict.pop(_UpperCAmelCase )
hf_value_function.load_state_dict(_UpperCAmelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , "w" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( ) -> List[Any]:
__snake_case = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 1_28, 2_56),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_55_36,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
__snake_case = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
__snake_case = model
__snake_case = UNetaDModel(**_UpperCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__snake_case = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__snake_case = state_dict.pop(_UpperCAmelCase )
hf_value_function.load_state_dict(_UpperCAmelCase )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 69
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowercase , _lowercase ) -> Optional[int]:
try:
with open(_lowercase , "rb" ) as flax_state_f:
__A : int = from_bytes(_lowercase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_lowercase ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_lowercase , _lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
__A : Dict = flatten_dict(jax.tree_util.tree_map(lambda _lowercase : x.dtype == jnp.bfloataa , _lowercase ) ).values()
if any(_lowercase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
__A : Any = jax.tree_util.tree_map(
lambda _lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowercase )
__A : Union[str, Any] = ""
__A : List[Any] = flatten_dict(_lowercase , sep="." )
__A : str = pt_model.state_dict()
# keep track of unexpected & missing keys
__A : Tuple = []
__A : List[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__A : List[Any] = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__A : Dict = flax_key_tuple_array[:-1] + ["weight"]
__A : List[str] = jnp.transpose(_lowercase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__A : str = flax_key_tuple_array[:-1] + ["weight"]
__A : List[str] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__A : Optional[int] = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_lowercase ):
__A : List[Any] = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
__A : Optional[int] = ".".join(_lowercase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
__A : str = np.asarray(_lowercase ) if not isinstance(_lowercase , np.ndarray ) else flax_tensor
__A : Union[str, Any] = torch.from_numpy(_lowercase )
# remove from missing keys
missing_keys.remove(_lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowercase )
pt_model.load_state_dict(_lowercase )
# re-transform missing_keys to list
__A : Optional[Any] = list(_lowercase )
if len(_lowercase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(_lowercase ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
" use it for predictions and inference." )
return pt_model
| 520
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( a_ , a_ ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = word.split()
def justify(a_ , a_ , a_ ) -> str:
SCREAMING_SNAKE_CASE : Any = max_width - width
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
if len(a_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
SCREAMING_SNAKE_CASE : List[str] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
SCREAMING_SNAKE_CASE : Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
SCREAMING_SNAKE_CASE : Any = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(a_ ):
num_spaces_between_words_list[i] += 1
SCREAMING_SNAKE_CASE : Any = []
for i in range(a_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : list[str] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for word in words:
if width + len(a_ ) + len(a_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(a_ )
width += len(a_ )
else:
# justify the line and add it to result
answer.append(justify(a_ , a_ , a_ ) )
# reset new line and new width
SCREAMING_SNAKE_CASE : Optional[int] = [word], len(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = max_width - width - len(a_ )
answer.append(' '.join(a_ ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 708
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
snake_case__ : Dict = LEDConfig
snake_case__ : Dict = {}
snake_case__ : int = "gelu"
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : List[Any] = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = eos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE : Optional[int] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : str = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE : Optional[int] = global_attention_mask
return config, inputs_dict
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = TFLEDModel(config=lowercase__ ).get_decoder()
SCREAMING_SNAKE_CASE : int = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE : Tuple = input_ids[:1, :]
SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE : Optional[Any] = 1
# first forward pass
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : int = model(lowercase__ , attention_mask=lowercase__ )[0]
SCREAMING_SNAKE_CASE : List[Any] = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : Tuple = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def __lowerCAmelCase ( a_ , a_ , a_ , a_=None , a_=None , a_=None , a_=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE : int = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
snake_case__ : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
snake_case__ : Union[str, Any] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case__ : List[Any] = True
snake_case__ : Tuple = False
snake_case__ : Dict = False
snake_case__ : int = False
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowercase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.zeros_like(inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.seq_length
SCREAMING_SNAKE_CASE : str = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
SCREAMING_SNAKE_CASE : List[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def _UpperCamelCase ( self ) -> Tuple:
pass
def _UpperCamelCase ( self ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def __lowerCAmelCase ( a_ ) -> Any:
'''simple docstring'''
return tf.constant(a_ , dtype=tf.intaa )
_lowerCAmelCase :Union[str, Any] = 1E-4
@slow
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Any = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
SCREAMING_SNAKE_CASE : Optional[Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : List[str] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : Any = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = model(**lowercase__ )[0]
SCREAMING_SNAKE_CASE : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : str = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
SCREAMING_SNAKE_CASE : List[Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : str = model(**lowercase__ )[0]
SCREAMING_SNAKE_CASE : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 179
| 0
|
'''simple docstring'''
_lowercase = 65521
def A (__lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = 1
_lowerCAmelCase = 0
for plain_chr in plain_text:
_lowerCAmelCase = (a + ord(a_ )) % MOD_ADLER
_lowerCAmelCase = (b + a) % MOD_ADLER
return (b << 16) | a
| 5
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def __lowerCAmelCase ( a_ ) -> Optional[int]:
'''simple docstring'''
if hor == 128:
SCREAMING_SNAKE_CASE : Union[str, Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
SCREAMING_SNAKE_CASE : List[Any] = (32, 128, 256)
SCREAMING_SNAKE_CASE : Optional[Any] = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
SCREAMING_SNAKE_CASE : str = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
SCREAMING_SNAKE_CASE : Union[str, Any] = (32, 64, 128, 256)
SCREAMING_SNAKE_CASE : List[str] = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.state_dict()
SCREAMING_SNAKE_CASE : str = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_5536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
SCREAMING_SNAKE_CASE : Optional[int] = UNetaDModel(**a_ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(a_ )
hf_value_function.load_state_dict(a_ )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , 'w' ) as f:
json.dump(a_ , a_ )
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_5536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
SCREAMING_SNAKE_CASE : List[Any] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
SCREAMING_SNAKE_CASE : int = model
SCREAMING_SNAKE_CASE : str = UNetaDModel(**a_ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE : Any = state_dict.pop(a_ )
hf_value_function.load_state_dict(a_ )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(a_ , a_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 251
| 0
|
from __future__ import annotations
def __snake_case ( _UpperCamelCase ) -> list[int]:
_a = [True] * limit
_a = False
_a = False
_a = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_a = i * 2
while index < limit:
_a = False
_a = index + i
_a = [2]
for i in range(3 , _UpperCamelCase , 2 ):
if is_prime[i]:
primes.append(_UpperCamelCase )
return primes
def __snake_case ( _UpperCamelCase = 1_00_00_00 ) -> int:
_a = prime_sieve(_UpperCamelCase )
_a = 0
_a = 0
for i in range(len(_UpperCamelCase ) ):
for j in range(i + length , len(_UpperCamelCase ) ):
_a = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_a = j - i
_a = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 346
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __snake_case ( _UpperCamelCase ) -> int:
_a = prime_factors(_UpperCamelCase )
if is_square_free(_UpperCamelCase ):
return -1 if len(_UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346
| 1
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1000 ):
'''simple docstring'''
_lowerCAmelCase = 1
_lowerCAmelCase = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE_ , digit + 1 ):
_lowerCAmelCase = []
_lowerCAmelCase = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
|
"""simple docstring"""
def __snake_case ( __A : list , __A : int , __A : int = 0 , __A : int = 0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265
| 0
|
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ : dict, lowerCamelCase__ : str ):
_a , _a = set(lowerCamelCase__ ), [start]
while stack:
_a = stack.pop()
explored.add(lowerCamelCase__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowerCamelCase__ )
return explored
__snake_case : Tuple = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 691
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691
| 1
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : Dict = 3
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
pass
def lowercase_ ( _snake_case ):
for shard in shards:
for i in range(_snake_case ):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = int(os.environ["""RANK"""] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(os.environ["""WORLD_SIZE"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ArgumentParser()
parser.add_argument("""--streaming""" ,type=_snake_case )
parser.add_argument("""--local_rank""" ,type=_snake_case )
parser.add_argument("""--num_workers""" ,type=_snake_case ,default=0 )
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = args.streaming
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.num_workers
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""shards""": [f'''shard_{shard_idx}''' for shard_idx in range(_snake_case )]}
SCREAMING_SNAKE_CASE__ : str = IterableDataset.from_generator(_snake_case ,gen_kwargs=_snake_case )
if not streaming:
SCREAMING_SNAKE_CASE__ : str = Dataset.from_list(list(_snake_case ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = split_dataset_by_node(_snake_case ,rank=_snake_case ,world_size=_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = torch.utils.data.DataLoader(_snake_case ,num_workers=_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE__ : Optional[int] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE__ : Tuple = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 223
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 223
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_lowercase : Optional[int] = random.Random()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int=1.0 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=None ) -> List[str]:
if rng is None:
lowercase_ : str = global_rng
lowercase_ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __magic_name__ ( unittest.TestCase):
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : List[Any]=400 , lowercase_ : List[Any]=2000 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=0.0 , lowercase_ : Any=16000 , lowercase_ : Optional[Any]=True , lowercase_ : Tuple=True , ):
lowercase_ : List[str] = parent
lowercase_ : int = batch_size
lowercase_ : str = min_seq_length
lowercase_ : int = max_seq_length
lowercase_ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ : str = feature_size
lowercase_ : Optional[Any] = padding_value
lowercase_ : Dict = sampling_rate
lowercase_ : List[str] = return_attention_mask
lowercase_ : str = do_normalize
def SCREAMING_SNAKE_CASE_ ( self : str ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any]=False , lowercase_ : str=False ):
def _flatten(lowercase_ : str ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
lowercase_ : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase_ : Optional[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ : List[Any] = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = WavaVecaFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = WavaVecaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Dict = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
lowercase_ : str = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
lowercase_ : List[str] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test batched
lowercase_ : List[str] = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
lowercase_ : Union[str, Any] = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ : str = np.asarray(lowercase_ )
lowercase_ : Any = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
lowercase_ : Any = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : List[str] = ["""longest""", """max_length""", """do_not_pad"""]
lowercase_ : Optional[int] = [None, 1600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
lowercase_ : Union[str, Any] = feat_extract(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors="""np""" )
lowercase_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : List[str] = range(800 , 1400 , 200 )
lowercase_ : Any = [floats_list((1, x) )[0] for x in lengths]
lowercase_ : List[str] = ["""longest""", """max_length""", """do_not_pad"""]
lowercase_ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
lowercase_ : Optional[Any] = feat_extract(lowercase_ , max_length=lowercase_ , padding=lowercase_ )
lowercase_ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Any = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
lowercase_ : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Tuple = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
lowercase_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowercase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Optional[int] = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
lowercase_ : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
import torch
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : Any = np.random.rand(100 ).astype(np.floataa )
lowercase_ : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase_ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowercase_ : Any = WavaVecaConfig.from_pretrained(lowercase_ )
lowercase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 720
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase ):
a__: Dict = 'pixel_values'
a__: Any = False
a__: List[Any] = TimmBackboneConfig
def __init__( self , UpperCAmelCase , **UpperCAmelCase ):
requires_backends(self , '''timm''' )
super().__init__(UpperCAmelCase )
lowerCamelCase_ = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f"backbone {config.backbone} is not supported by timm." )
if hasattr(UpperCAmelCase , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowerCamelCase_ = getattr(UpperCAmelCase , '''use_pretrained_backbone''' , UpperCAmelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCamelCase_ = config.out_indices if getattr(UpperCAmelCase , '''out_indices''' , UpperCAmelCase ) is not None else (-1,)
lowerCamelCase_ = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCamelCase_ = self._backbone.return_layers
lowerCamelCase_ = {layer['''module''']: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ):
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCamelCase_ = kwargs.pop('''config''' , TimmBackboneConfig() )
lowerCamelCase_ = kwargs.pop('''use_timm_backbone''' , UpperCAmelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowerCamelCase_ = kwargs.pop('''num_channels''' , config.num_channels )
lowerCamelCase_ = kwargs.pop('''features_only''' , config.features_only )
lowerCamelCase_ = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
lowerCamelCase_ = kwargs.pop('''out_indices''' , config.out_indices )
lowerCamelCase_ = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
pass
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCamelCase_ = self._all_layers
lowerCamelCase_ = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self._return_layers
lowerCamelCase_ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCamelCase_ = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = None
lowerCamelCase_ = tuple(UpperCAmelCase )
lowerCamelCase_ = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCamelCase_ = (feature_maps,)
if output_hidden_states:
lowerCamelCase_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
| 29
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( ):
__lowercase = 2
while True:
if is_prime(lowerCamelCase_ ):
yield num
num += 1
def _lowerCAmelCase ( lowerCamelCase_ : int = 2_0_0_0_0_0_0 ):
return sum(takewhile(lambda lowerCamelCase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 502
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """canine"""
def __init__( self :List[str] , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :Optional[int]=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :List[Any]=1_63_84 , lowerCamelCase_ :List[Any]=16 , lowerCamelCase_ :Any=0.0_2 , lowerCamelCase_ :List[str]=1E-12 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Optional[int]=0xE000 , lowerCamelCase_ :List[Any]=0xE001 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :str=4 , lowerCamelCase_ :str=8 , lowerCamelCase_ :Tuple=1_63_84 , lowerCamelCase_ :Dict=1_28 , **lowerCamelCase_ :Optional[int] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
# Character config:
SCREAMING_SNAKE_CASE : Any = downsampling_rate
SCREAMING_SNAKE_CASE : List[Any] = upsampling_kernel_size
SCREAMING_SNAKE_CASE : List[Any] = num_hash_functions
SCREAMING_SNAKE_CASE : Dict = num_hash_buckets
SCREAMING_SNAKE_CASE : str = local_transformer_stride
| 18
|
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCAmelCase : Dict = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
snake_case_ : List[Any] = bs[:]
snake_case_ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
snake_case_ : Union[str, Any] = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[str] = set()
snake_case_ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : List[str] = char
return pairs
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase , _lowercase , _lowercase="replace" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
snake_case_ : Union[str, Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
snake_case_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
snake_case_ : Union[str, Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
snake_case_ : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding="""utf-8""" ) as vocab_handle:
snake_case_ : Dict = json.load(_lowercase )
snake_case_ : Optional[int] = {v: k for k, v in self.encoder.items()}
snake_case_ : Optional[Any] = errors # how to handle errors in decoding
snake_case_ : str = bytes_to_unicode()
snake_case_ : Any = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding="""utf-8""" ) as merges_handle:
snake_case_ : Dict = merges_handle.read().split("""\n""" )[1:-1]
snake_case_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : List[str] = {}
snake_case_ : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ : Dict = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case_ : Union[str, Any] = tuple(_lowercase )
snake_case_ : Optional[int] = get_pairs(_lowercase )
if not pairs:
return token
while True:
snake_case_ : Tuple = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : str = bigram
snake_case_ : List[Any] = []
snake_case_ : str = 0
while i < len(_lowercase ):
try:
snake_case_ : Optional[Any] = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Dict = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Optional[int] = tuple(_lowercase )
snake_case_ : Any = new_word
if len(_lowercase ) == 1:
break
else:
snake_case_ : Any = get_pairs(_lowercase )
snake_case_ : List[Any] = """ """.join(_lowercase )
snake_case_ : int = word
return word
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = []
for token in re.findall(self.pat , _lowercase ):
snake_case_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
return self.decoder.get(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = """""".join(_lowercase )
snake_case_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ : str = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Union[str, Any] = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + """\n""" )
snake_case_ : str = 0
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
snake_case_ : List[Any] = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : int = [self.cls_token_id]
snake_case_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Tuple = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
snake_case_ : List[str] = """ """ + text
return (text, kwargs)
| 58
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowercase ( a , a ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
SCREAMING_SNAKE_CASE_ :List[Any] = flax_key_tuple[:-1] + ("weight",)
SCREAMING_SNAKE_CASE_ :Any = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
SCREAMING_SNAKE_CASE_ :str = flax_key_tuple[:-1] + ("weight",)
SCREAMING_SNAKE_CASE_ :Optional[int] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def lowercase ( a , a , a ):
'''simple docstring'''
if "metadata" in layer:
SCREAMING_SNAKE_CASE_ :Dict = layer.split("metadata" )
SCREAMING_SNAKE_CASE_ :Optional[int] = "".join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ :str = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
SCREAMING_SNAKE_CASE_ :str = layer.split("kvstore" )
SCREAMING_SNAKE_CASE_ :str = "".join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ :str = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = layer.split("/" )
SCREAMING_SNAKE_CASE_ :Optional[int] = "/".join(split_layer[:-1] )
SCREAMING_SNAKE_CASE_ :int = (split_layer[-1],)
if "kvstore/path" in layer:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
SCREAMING_SNAKE_CASE_ :Tuple = "file"
else:
SCREAMING_SNAKE_CASE_ :str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = rename_keys(a )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {}
for k, v in current_block.items():
SCREAMING_SNAKE_CASE_ :List[str] = v
SCREAMING_SNAKE_CASE_ :Optional[Any] = new_current_block
torch.save(a , a )
def lowercase ( a , a , a , a , a = WEIGHTS_NAME ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[int] = convert_file_size_to_int(a )
SCREAMING_SNAKE_CASE_ :int = []
SCREAMING_SNAKE_CASE_ :str = {}
SCREAMING_SNAKE_CASE_ :List[str] = 0
SCREAMING_SNAKE_CASE_ :Optional[int] = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ :int = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
SCREAMING_SNAKE_CASE_ :Any = flatten_dict(a , sep="/" )
SCREAMING_SNAKE_CASE_ :Optional[Any] = {}
for layer in checkpoint_info.keys():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
SCREAMING_SNAKE_CASE_ :str = content
else:
SCREAMING_SNAKE_CASE_ :Optional[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
SCREAMING_SNAKE_CASE_ :Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
SCREAMING_SNAKE_CASE_ :List[Any] = torch.tensor(a )
SCREAMING_SNAKE_CASE_ :str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , a )
SCREAMING_SNAKE_CASE_ :Any = "/".join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
SCREAMING_SNAKE_CASE_ :str = os.path.join(
a , weights_name.replace(".bin" , F"-{len(a )+1:05d}-of-???.bin" ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
SCREAMING_SNAKE_CASE_ :Tuple = {}
SCREAMING_SNAKE_CASE_ :Dict = 0
SCREAMING_SNAKE_CASE_ :Optional[int] = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
SCREAMING_SNAKE_CASE_ :Dict = os.path.join(a , weights_name.replace(".bin" , F"-{len(a )+1:05d}-of-???.bin" ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE_ :Optional[int] = {}
SCREAMING_SNAKE_CASE_ :int = {}
for idx, shard in enumerate(a ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = weights_name.replace(
".bin" , F"-{idx+1:05d}-of-{len(a ):05d}.bin" ) # len(sharded_state_dicts):05d}
SCREAMING_SNAKE_CASE_ :Any = os.path.join(a , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(a , os.path.join(a , a ) )
SCREAMING_SNAKE_CASE_ :List[Any] = shard
for key in shard:
SCREAMING_SNAKE_CASE_ :str = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE_ :List[str] = {"total_size": total_size}
SCREAMING_SNAKE_CASE_ :Optional[int] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_ :Optional[int] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowercase ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
SCREAMING_SNAKE_CASE_ :Dict = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
SCREAMING_SNAKE_CASE_ :str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
SCREAMING_SNAKE_CASE_ :List[Any] = TaTokenizer.from_pretrained("t5-small" )
SCREAMING_SNAKE_CASE_ :Optional[int] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
SCREAMING_SNAKE_CASE_ :List[Any] = tokenizer(a , return_tensors="pt" ).input_ids
SCREAMING_SNAKE_CASE_ :List[str] = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 631
| 0
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "M-CLIP"
def __init__( self : Dict , __lowerCamelCase : List[str]=10_24 , __lowerCamelCase : Optional[Any]=7_68 , **__lowerCamelCase : List[Any] ) -> Optional[int]:
A : List[str] = transformerDimSize
A : Dict = imageDimSize
super().__init__(**__lowerCamelCase )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = MCLIPConfig
def __init__( self : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Any , **__lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
super().__init__(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
A : Dict = XLMRobertaModel(__lowerCamelCase )
A : List[str] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ) -> int:
A : Any = self.transformer(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
A : Any = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__lowerCamelCase ), embs
| 715
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} )
a__ = Features({"question": Value("string" ), "context": Value("string" )} )
a__ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
a__ = "question"
a__ = "context"
a__ = "answers"
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 17
| 0
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__=None , **UpperCamelCase__ ):
'''simple docstring'''
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , UpperCamelCase__ , )
super().__init__(args=UpperCamelCase__ , **UpperCamelCase__ )
| 337
|
"""simple docstring"""
__UpperCAmelCase ="""0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 337
| 1
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : List[str] = (KDPMaDiscreteScheduler,)
_A : Optional[Any] = 10
def A_ ( self : Dict , **__lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**__lowercase )
return config
def A_ ( self : Dict ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__lowercase )
def A_ ( self : Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__lowercase , beta_end=__lowercase )
def A_ ( self : Dict ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowercase )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase )
def A_ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.scheduler_classes[0]
__UpperCAmelCase : List[str] = self.get_scheduler_config(prediction_type='''v_prediction''' )
__UpperCAmelCase : Tuple = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCAmelCase : Optional[int] = self.dummy_model()
__UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase : Any = sample.to(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase : Tuple = scheduler.scale_model_input(__lowercase , __lowercase )
__UpperCAmelCase : Optional[Any] = model(__lowercase , __lowercase )
__UpperCAmelCase : Tuple = scheduler.step(__lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Dict = output.prev_sample
__UpperCAmelCase : List[str] = torch.sum(torch.abs(__lowercase ) )
__UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(__lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def A_ ( self : Optional[Any] ):
'''simple docstring'''
if torch_device == "mps":
return
__UpperCAmelCase : Dict = self.scheduler_classes[0]
__UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
__UpperCAmelCase : Optional[Any] = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCAmelCase : Optional[int] = self.dummy_model()
__UpperCAmelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase : Union[str, Any] = sample.to(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase : List[str] = scheduler.scale_model_input(__lowercase , __lowercase )
__UpperCAmelCase : List[Any] = model(__lowercase , __lowercase )
__UpperCAmelCase : str = scheduler.step(__lowercase , __lowercase , __lowercase )
__UpperCAmelCase : List[str] = output.prev_sample
__UpperCAmelCase : Dict = torch.sum(torch.abs(__lowercase ) )
__UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(__lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def A_ ( self : str ):
'''simple docstring'''
if torch_device == "mps":
return
__UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
__UpperCAmelCase : Tuple = self.get_scheduler_config()
__UpperCAmelCase : Any = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowercase )
__UpperCAmelCase : Optional[Any] = self.dummy_model()
__UpperCAmelCase : Optional[Any] = self.dummy_sample_deter.to(__lowercase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__UpperCAmelCase : Any = scheduler.scale_model_input(__lowercase , __lowercase )
__UpperCAmelCase : Optional[Any] = model(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = scheduler.step(__lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Any = output.prev_sample
__UpperCAmelCase : Optional[int] = torch.sum(torch.abs(__lowercase ) )
__UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(__lowercase ) )
if str(__lowercase ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 374
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowercase__ :int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 374
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return "".join(chr(ord(__lowerCAmelCase ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 530
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowercase , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowercase )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowercase , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase ( lowercase , lowercase , lowercase=None ):
"""simple docstring"""
__lowercase = load_checkpoint(lowercase )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowercase )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowercase ).half().eval()
model.load_state_dict(lowercase )
# Check results
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
__a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__a : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 534
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Tuple = logging.get_logger(__name__)
def __snake_case (__UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ : Tuple = '''backbone.''' if is_semantic else ''''''
lowerCamelCase_ : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"""{prefix}cls_token""", '''beit.embeddings.cls_token'''),
(F"""{prefix}patch_embed.proj.weight""", '''beit.embeddings.patch_embeddings.projection.weight'''),
(F"""{prefix}patch_embed.proj.bias""", '''beit.embeddings.patch_embeddings.projection.bias'''),
(F"""{prefix}pos_embed""", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowerCamelCase_ : Optional[int] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
lowerCamelCase_ : List[Any] = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowerCamelCase_ : Dict = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" )
lowerCamelCase_ : Dict = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" )
lowerCamelCase_ : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ : Tuple = q_bias
lowerCamelCase_ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase_ : Dict = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" )
lowerCamelCase_ : List[str] = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" )
lowerCamelCase_ : str = gamma_a
lowerCamelCase_ : Optional[Any] = gamma_a
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : List[str] = dct.pop(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = val
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Any = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ : List[str] = False if '''rvlcdip''' in checkpoint_url else True
lowerCamelCase_ : Any = BeitConfig(use_absolute_position_embeddings=__UpperCAmelCase , use_mask_token=__UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase_ : Dict = 1024
lowerCamelCase_ : List[Any] = 4096
lowerCamelCase_ : Optional[int] = 24
lowerCamelCase_ : Any = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase_ : Union[str, Any] = 16
lowerCamelCase_ : Union[str, Any] = '''huggingface/label-files'''
lowerCamelCase_ : str = '''rvlcdip-id2label.json'''
lowerCamelCase_ : List[str] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : Dict = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ : Union[str, Any] = idalabel
lowerCamelCase_ : int = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ : Optional[int] = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )['''model''']
lowerCamelCase_ : int = create_rename_keys(__UpperCAmelCase , has_lm_head=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , has_lm_head=__UpperCAmelCase )
# load HuggingFace model
lowerCamelCase_ : Dict = BeitForMaskedImageModeling(__UpperCAmelCase ) if has_lm_head else BeitForImageClassification(__UpperCAmelCase )
model.eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image
lowerCamelCase_ : Tuple = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCAmelCase )
lowerCamelCase_ : List[Any] = prepare_img()
lowerCamelCase_ : Dict = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
lowerCamelCase_ : Dict = encoding['''pixel_values''']
lowerCamelCase_ : Optional[int] = model(__UpperCAmelCase )
lowerCamelCase_ : Dict = outputs.logits
# verify logits
lowerCamelCase_ : str = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(__UpperCAmelCase ), "Shape of logits not as expected"
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
if has_lm_head:
lowerCamelCase_ : int = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
lowerCamelCase_ : Dict = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 418
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 418
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = '''sew'''
def __init__( self : List[Any] , __lowercase : Union[str, Any]=32 , __lowercase : List[Any]=7_68 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : List[str]=30_72 , __lowercase : List[str]=2 , __lowercase : Union[str, Any]="gelu" , __lowercase : Dict=0.1 , __lowercase : Dict=0.1 , __lowercase : str=0.1 , __lowercase : str=0.0 , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Dict=0.02 , __lowercase : Any=1E-5 , __lowercase : Optional[int]="group" , __lowercase : Optional[Any]="gelu" , __lowercase : Dict=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , __lowercase : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowercase : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowercase : List[Any]=False , __lowercase : Tuple=1_28 , __lowercase : Union[str, Any]=16 , __lowercase : List[str]=True , __lowercase : int=0.05 , __lowercase : Any=10 , __lowercase : Tuple=2 , __lowercase : Optional[Any]=0.0 , __lowercase : str=10 , __lowercase : Tuple=0 , __lowercase : str="mean" , __lowercase : Optional[Any]=False , __lowercase : Any=False , __lowercase : Optional[int]=2_56 , __lowercase : List[str]=0 , __lowercase : Any=1 , __lowercase : List[Any]=2 , **__lowercase : Any , ):
"""simple docstring"""
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(__lowercase )
snake_case_ = list(__lowercase )
snake_case_ = list(__lowercase )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = squeeze_factor
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# sequence classification
snake_case_ = use_weighted_layer_sum
snake_case_ = classifier_proj_size
@property
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 376
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = old_name
if "patch_embed" in old_name:
snake_case_ , snake_case_ , snake_case_ = old_name.split("." )
if layer == "0":
snake_case_ = old_name.replace("0" , "convolution1" )
elif layer == "1":
snake_case_ = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
snake_case_ = old_name.replace("3" , "convolution2" )
else:
snake_case_ = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , _A ):
snake_case_ = R"\b\d{2}\b"
if bool(re.search(_A , _A ) ):
snake_case_ = re.search(R"\d\.\d\d." , _A ).group()
else:
snake_case_ = re.search(R"\d\.\d." , _A ).group()
if int(match[0] ) < 6:
snake_case_ = old_name.replace(_A , "" )
snake_case_ = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
snake_case_ = "intermediate_stages." + trimmed_name
else:
snake_case_ = old_name.replace(_A , "" )
if int(match[2] ) < num_meta4D_last_stage:
snake_case_ = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
snake_case_ = str(int(match[2] ) - num_meta4D_last_stage )
snake_case_ = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
snake_case_ = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
snake_case_ = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
snake_case_ = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
snake_case_ = trimmed_name.replace("fc2" , "linear_out" )
snake_case_ = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , _A ):
snake_case_ = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
snake_case_ = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case_ = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case_ = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
snake_case_ = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
snake_case_ = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
snake_case_ = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
snake_case_ = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case_ = new_name.replace("norm" , "layernorm" )
snake_case_ = "efficientformer." + new_name
else:
snake_case_ = "efficientformer.encoder." + new_name
return new_name
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
for key in checkpoint.copy().keys():
snake_case_ = checkpoint.pop(_A )
snake_case_ = val
return checkpoint
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = Image.open(requests.get(_A , stream=_A ).raw )
return image
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = torch.load(_A , map_location="cpu" )["model"]
snake_case_ = EfficientFormerConfig.from_json_file(_A )
snake_case_ = EfficientFormerForImageClassificationWithTeacher(_A )
snake_case_ = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
snake_case_ = config.depths[-1] - config.num_metaad_blocks + 1
snake_case_ = convert_torch_checkpoint(_A , _A )
model.load_state_dict(_A )
model.eval()
snake_case_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
snake_case_ = prepare_img()
snake_case_ = 256
snake_case_ = 224
snake_case_ = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
snake_case_ = processor(images=_A , return_tensors="pt" ).pixel_values
# original processing pipeline
snake_case_ = Compose(
[
Resize(_A , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(_A ),
ToTensor(),
Normalize(_A , _A ),
] )
snake_case_ = image_transforms(_A ).unsqueeze(0 )
assert torch.allclose(_A , _A )
snake_case_ = model(_A )
snake_case_ = outputs.logits
snake_case_ = (1, 1000)
if "l1" in model_name:
snake_case_ = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , _A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case_ = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , _A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case_ = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(_A )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message="Add model" , use_temp_dir=_A , )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message="Add image processor" , use_temp_dir=_A , )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
lowercase__ : Optional[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 376
| 1
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE : str = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase_ ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *a__ , **a__ ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase_ , **lowercase_ )
return wrapper
| 707
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
SCREAMING_SNAKE_CASE : List[Any] = len(a__ )
SCREAMING_SNAKE_CASE : int = max(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = min(a__ )
# create the counting array
SCREAMING_SNAKE_CASE : str = coll_max + 1 - coll_min
SCREAMING_SNAKE_CASE : Any = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , a__ ):
SCREAMING_SNAKE_CASE : Tuple = counting_arr[i] + counting_arr[i - 1]
# create the output collection
SCREAMING_SNAKE_CASE : int = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , a__ ) ):
SCREAMING_SNAKE_CASE : Dict = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return "".join([chr(a__ ) for i in counting_sort([ord(a__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
a__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
a__ : str = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 333
| 0
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
a_ = 'bart'
a_ = True
@st.cache(allow_output_mutation=UpperCamelCase__ )
def _a ( ) -> str:
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowerCAmelCase__ = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
lowerCAmelCase__ = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
lowerCAmelCase__ = qar_model.eval()
else:
lowerCAmelCase__ = (None, None)
if MODEL_TYPE == "bart":
lowerCAmelCase__ = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
lowerCAmelCase__ = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
lowerCAmelCase__ = sas_model.eval()
else:
lowerCAmelCase__ = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase__ )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowerCAmelCase__ = faiss.StandardGpuResources()
lowerCAmelCase__ = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )['''train''']
lowerCAmelCase__ = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
lowerCAmelCase__ = faiss.IndexFlatIP(128 )
lowerCAmelCase__ = faiss.index_cpu_to_gpu(UpperCamelCase__ , 1 , UpperCamelCase__ )
wikiaab_gpu_index_flat.add(UpperCamelCase__ ) # TODO fix for larger GPU
else:
lowerCAmelCase__ = (None, None)
lowerCAmelCase__ = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase__ )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = datasets.load_dataset("eli5" , name="LFQA_reddit" )
lowerCAmelCase__ = elia['''train_eli5''']
lowerCAmelCase__ = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
lowerCAmelCase__ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(UpperCamelCase__ )
return (elia_train, eli5_train_q_index)
a_ = load_indexes()
a_ = load_models()
a_ = load_train_data()
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple=10 ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = embed_questions_for_retrieval([question] , UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase__ = eli5_train_q_index.search(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase__ = [elia_train[int(UpperCamelCase__ )] for i in I[0]]
return nn_examples
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Dict="wiki40b" , UpperCamelCase_ : Union[str, Any]="dense" , UpperCamelCase_ : Tuple=10 ) -> List[Any]:
"""simple docstring"""
if source == "none":
lowerCAmelCase__ = (''' <P> '''.join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCAmelCase__ = query_qa_dense_index(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
lowerCAmelCase__ = query_es_index(
UpperCamelCase__ , UpperCamelCase__ , index_name="english_wiki40b_snippets_100w" , n_results=UpperCamelCase__ , )
lowerCAmelCase__ = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
lowerCAmelCase__ = '''question: {} context: {}'''.format(UpperCamelCase__ , UpperCamelCase__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase_ : None),
} )
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Union[str, Any]=256 , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Tuple=0.95 , UpperCamelCase_ : List[str]=0.8 ) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
lowerCAmelCase__ = qa_sas_generate(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , num_answers=1 , num_beams=UpperCamelCase__ , min_len=UpperCamelCase__ , max_len=UpperCamelCase__ , do_sample=UpperCamelCase__ , temp=UpperCamelCase__ , top_p=UpperCamelCase__ , top_k=UpperCamelCase__ , max_input_length=1_024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
a_ = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
a_ = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
a_ = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
a_ = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
a_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
a_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
a_ = action_list.index(action_st)
a_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
a_ = show_type == 'Show full text of passages'
else:
a_ = 3
a_ = True
a_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
a_ = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
a_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
a_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
a_ = 'wiki40b'
a_ = 'dense'
a_ = 'beam'
a_ = 2
a_ = 64
a_ = 256
a_ = None
a_ = None
a_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
a_ = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
a_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
a_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
a_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
a_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
a_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
a_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
a_ = None
# start main text
a_ = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
a_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
a_ = st.text_input('''Enter your question here:''', '''''')
else:
a_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
a_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
a_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
a_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
a_ = support_list[:10]
a_ = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
a_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
a_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
a_ = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(''' ''', '''_'''))
a_ = res[1].strip()
if sec_titles == "":
a_ = '[{}]({})'.format(res[0], wiki_url)
else:
a_ = sec_titles.split(''' & ''')
a_ = ' & '.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
a_ = find_nearest_training(question)
a_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
a_ = [
'{}. {}'.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
a_ = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 339
|
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=False ):
try:
_UpperCAmelCase : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase : str = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase : List[str] = strtobool(UpperCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
_lowerCAmelCase :Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase_ (UpperCamelCase__ : int ):
return unittest.skip('''Test was skipped''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int ):
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Tuple ):
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : str=None , UpperCamelCase__ : str=None ):
if test_case is None:
return partial(UpperCamelCase__ , version=UpperCamelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , UpperCamelCase__ ) , F'test requires torch version >= {version}' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Any ):
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(UpperCamelCase__ )
_lowerCAmelCase :Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase_ (UpperCamelCase__ : str ):
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(UpperCamelCase__ )
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a__ =True
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = tempfile.mkdtemp()
@classmethod
def __lowerCAmelCase ( cls ) -> List[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __lowerCAmelCase ( self ) -> Dict:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A )
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self , A ) -> List[Any]:
_UpperCAmelCase : Any = mocks if isinstance(A , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = AcceleratorState()
_UpperCAmelCase : Union[str, Any] = tensor[None].clone().to(state.device )
_UpperCAmelCase : List[str] = gather(UpperCamelCase__ ).cpu()
_UpperCAmelCase : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , UpperCamelCase__ ):
return False
return True
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A , A ) -> Optional[int]:
_UpperCAmelCase : List[Any] = returncode
_UpperCAmelCase : Any = stdout
_UpperCAmelCase : Tuple = stderr
async def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
while True:
_UpperCAmelCase : Tuple = await stream.readline()
if line:
callback(UpperCamelCase__ )
else:
break
async def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=False ):
if echo:
print('''\nRunning: ''' , ''' '''.join(UpperCamelCase__ ) )
_UpperCAmelCase : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Optional[int] = []
def tee(UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Dict="" ):
_UpperCAmelCase : int = line.decode('''utf-8''' ).rstrip()
sink.append(UpperCamelCase__ )
if not quiet:
print(UpperCamelCase__ , UpperCamelCase__ , file=UpperCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=UpperCamelCase__ , )
return _RunOutput(await p.wait() , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=180 , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=True ):
_UpperCAmelCase : Optional[int] = asyncio.get_event_loop()
_UpperCAmelCase : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(UpperCamelCase__ , env=UpperCamelCase__ , stdin=UpperCamelCase__ , timeout=UpperCamelCase__ , quiet=UpperCamelCase__ , echo=UpperCamelCase__ ) )
_UpperCAmelCase : Tuple = ''' '''.join(UpperCamelCase__ )
if result.returncode > 0:
_UpperCAmelCase : Any = '''\n'''.join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class _UpperCAmelCase ( a ):
'''simple docstring'''
pass
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : str=False ):
try:
_UpperCAmelCase : Union[str, Any] = subprocess.check_output(UpperCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCamelCase__ , '''decode''' ):
_UpperCAmelCase : List[Any] = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(UpperCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 506
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case :Any =16
__snake_case :Tuple =32
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : Optional[Any] = "bert-base-cased" ) -> int:
'''simple docstring'''
A = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase__ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowerCAmelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
model.eval()
A = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**lowerCAmelCase__ )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__ ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
A = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(lowerCAmelCase__ )
A , A = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A = 1
A = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
A = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
A = evaluate.load('glue' , 'mrpc' )
A = num_epochs
if args.partial_train_epoch is not None:
A = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A = args.resume_from_checkpoint.split('epoch_' )[1]
A = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A = int(lowerCAmelCase__ ) + 1
A = evaluation_loop(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.print('resumed checkpoint performance:' , lowerCAmelCase__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
A = json.load(lowerCAmelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
A = model(**lowerCAmelCase__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A = F'''epoch_{epoch}'''
A = os.path.join(args.output_dir , lowerCAmelCase__ )
accelerator.save_state(lowerCAmelCase__ )
A = evaluation_loop(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
A = accuracy
A = lr_scheduler.get_lr()[0]
A = optimizer.param_groups[0]['lr']
A = epoch
A = overall_step
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowerCAmelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowerCAmelCase__ , )
parser.add_argument(
'--output_dir' , type=lowerCAmelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=lowerCAmelCase__ , default=2 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 709
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__ :
def __init__( self : Any , __UpperCamelCase : str , __UpperCamelCase : Any=13 , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : int=99 , __UpperCamelCase : Any=32 , __UpperCamelCase : int=2 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : int=2 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : Any=4 , __UpperCamelCase : List[str]=None , ) -> Union[str, Any]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = True
A = 99
A = 384
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.0_2
A = 3
A = 4
A = 128
A = 2
A = 9
A = 1
A = None
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
A = TFConvBertModel(config=__UpperCamelCase )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = [input_ids, input_mask]
A = model(__UpperCamelCase )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ) -> str:
A = TFConvBertForMaskedLM(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Tuple:
A = self.num_labels
A = TFConvBertForSequenceClassification(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) -> Tuple:
A = self.num_choices
A = TFConvBertForMultipleChoice(config=__UpperCamelCase )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> Any:
A = self.num_labels
A = TFConvBertForTokenClassification(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ) -> str:
A = TFConvBertForQuestionAnswering(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Any ) -> List[str]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Tuple = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : int = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Optional[int] = False
A_ : Any = False
A_ : str = False
def __UpperCamelCase ( self : int ) -> Any:
A = TFConvBertModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Dict ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = True
if hasattr(__UpperCamelCase , 'use_cache' ):
A = True
A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
for model_class in self.all_model_classes:
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
A = model_class(__UpperCamelCase )
A = len(model(__UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase , saved_model=__UpperCamelCase )
A = os.path.join(__UpperCamelCase , 'saved_model' , '1' )
A = tf.keras.models.load_model(__UpperCamelCase )
A = model(__UpperCamelCase )
if self.is_encoder_decoder:
A = outputs['encoder_hidden_states']
A = outputs['encoder_attentions']
else:
A = outputs['hidden_states']
A = outputs['attentions']
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
A = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self : str ) -> str:
A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
def check_decoder_attentions_output(__UpperCamelCase : List[Any] ):
A = len(__UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
A = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__UpperCamelCase : Dict ):
A = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A = True
A = False
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
A = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A = True
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(__UpperCamelCase )[0]
A = [1, 6, 768]
self.assertEqual(output.shape , __UpperCamelCase )
A = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
| 224
| 0
|
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=a ):
A__ : Any = ['transformers', 'torch', 'note_seq']
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def lowerCAmelCase( cls : Tuple , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def lowerCAmelCase( cls : Dict , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Any ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 598
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class a_ ( a ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A__ : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
A__ : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
A__ : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
A__ : str = "question"
A__ : str = "context"
A__ : str = "answers"
@property
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 598
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : List[Any] = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCamelCase : Dict = logging.get_logger(__name__)
@dataclass
class lowercase :
'''simple docstring'''
UpperCAmelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
UpperCAmelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
UpperCAmelCase : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.task_name.lower()
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : List[Any] = 'train'
UpperCAmelCase : Optional[Any] = 'dev'
UpperCAmelCase : Optional[int] = 'test'
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : GlueDataTrainingArguments
UpperCAmelCase : str
UpperCAmelCase : List[InputFeatures]
def __init__( self : Union[str, Any] , snake_case : GlueDataTrainingArguments , snake_case : PreTrainedTokenizerBase , snake_case : Optional[int] = None , snake_case : Union[str, Split] = Split.train , snake_case : Optional[str] = None , ):
'''simple docstring'''
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , snake_case , )
SCREAMING_SNAKE_CASE : Tuple = args
SCREAMING_SNAKE_CASE : int = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE : str = glue_output_modes[args.task_name]
if isinstance(snake_case , snake_case ):
try:
SCREAMING_SNAKE_CASE : Any = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
SCREAMING_SNAKE_CASE : Any = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE : Union[str, Any] = cached_features_file + '.lock'
with FileLock(snake_case ):
if os.path.exists(snake_case ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE : Optional[int] = time.time()
SCREAMING_SNAKE_CASE : int = torch.load(snake_case )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
SCREAMING_SNAKE_CASE : str = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
SCREAMING_SNAKE_CASE : Dict = self.processor.get_test_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE : str = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = examples[:limit_length]
SCREAMING_SNAKE_CASE : Optional[Any] = glue_convert_examples_to_features(
snake_case , snake_case , max_length=args.max_seq_length , label_list=snake_case , output_mode=self.output_mode , )
SCREAMING_SNAKE_CASE : Tuple = time.time()
torch.save(self.features , snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : int ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict , snake_case : Optional[int] ):
'''simple docstring'''
return self.features[i]
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.label_list
| 308
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = AudioLDMPipeline
UpperCamelCase_ = TEXT_TO_AUDIO_PARAMS
UpperCamelCase_ = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCamelCase_ = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase : str =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=UpperCAmelCase , )
lowercase : Dict =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase : str =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Optional[int] =ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
lowercase : Dict =ClapTextModelWithProjection(UpperCAmelCase )
lowercase : List[str] =RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
lowercase : Any =SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCAmelCase , )
lowercase : Dict =SpeechTaHifiGan(UpperCAmelCase )
lowercase : int ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(UpperCAmelCase ).startswith('''mps''' ):
lowercase : int =torch.manual_seed(UpperCAmelCase )
else:
lowercase : Tuple =torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase : Optional[Any] ={
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : str ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Any =self.get_dummy_components()
lowercase : str =AudioLDMPipeline(**UpperCAmelCase )
lowercase : Tuple =audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : List[str] =self.get_dummy_inputs(UpperCAmelCase )
lowercase : int =audioldm_pipe(**UpperCAmelCase )
lowercase : Dict =output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase ) == 256
lowercase : int =audio[:10]
lowercase : Union[str, Any] =np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.get_dummy_components()
lowercase : List[str] =AudioLDMPipeline(**UpperCAmelCase )
lowercase : Optional[int] =audioldm_pipe.to(UpperCAmelCase )
lowercase : Any =audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Union[str, Any] =self.get_dummy_inputs(UpperCAmelCase )
lowercase : Optional[Any] =3 * [inputs['''prompt''']]
# forward
lowercase : Optional[int] =audioldm_pipe(**UpperCAmelCase )
lowercase : Dict =output.audios[0]
lowercase : str =self.get_dummy_inputs(UpperCAmelCase )
lowercase : int =3 * [inputs.pop('''prompt''' )]
lowercase : List[Any] =audioldm_pipe.tokenizer(
UpperCAmelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase , return_tensors='''pt''' , )
lowercase : int =text_inputs['''input_ids'''].to(UpperCAmelCase )
lowercase : Optional[int] =audioldm_pipe.text_encoder(
UpperCAmelCase , )
lowercase : Any =prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase : int =F.normalize(UpperCAmelCase , dim=-1 )
lowercase : Union[str, Any] =prompt_embeds
# forward
lowercase : int =audioldm_pipe(**UpperCAmelCase )
lowercase : Optional[Any] =output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def A__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =self.get_dummy_components()
lowercase : Any =AudioLDMPipeline(**UpperCAmelCase )
lowercase : str =audioldm_pipe.to(UpperCAmelCase )
lowercase : List[str] =audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : int =self.get_dummy_inputs(UpperCAmelCase )
lowercase : List[str] =3 * ['''this is a negative prompt''']
lowercase : List[str] =negative_prompt
lowercase : List[Any] =3 * [inputs['''prompt''']]
# forward
lowercase : List[Any] =audioldm_pipe(**UpperCAmelCase )
lowercase : Optional[int] =output.audios[0]
lowercase : str =self.get_dummy_inputs(UpperCAmelCase )
lowercase : List[Any] =3 * [inputs.pop('''prompt''' )]
lowercase : str =[]
for p in [prompt, negative_prompt]:
lowercase : Tuple =audioldm_pipe.tokenizer(
UpperCAmelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase , return_tensors='''pt''' , )
lowercase : Optional[int] =text_inputs['''input_ids'''].to(UpperCAmelCase )
lowercase : List[str] =audioldm_pipe.text_encoder(
UpperCAmelCase , )
lowercase : Tuple =text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase : Dict =F.normalize(UpperCAmelCase , dim=-1 )
embeds.append(UpperCAmelCase )
lowercase , lowercase : Optional[Any] =embeds
# forward
lowercase : List[Any] =audioldm_pipe(**UpperCAmelCase )
lowercase : Optional[int] =output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Dict =self.get_dummy_components()
lowercase : Optional[int] =PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase : Tuple =AudioLDMPipeline(**UpperCAmelCase )
lowercase : List[str] =audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Any =self.get_dummy_inputs(UpperCAmelCase )
lowercase : Union[str, Any] ='''egg cracking'''
lowercase : Tuple =audioldm_pipe(**UpperCAmelCase , negative_prompt=UpperCAmelCase )
lowercase : Tuple =output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase ) == 256
lowercase : Optional[int] =audio[:10]
lowercase : Union[str, Any] =np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Tuple =self.get_dummy_components()
lowercase : Optional[int] =PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase : Optional[int] =AudioLDMPipeline(**UpperCAmelCase )
lowercase : List[Any] =audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Dict ='''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
lowercase : Optional[int] =audioldm_pipe(UpperCAmelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowercase : List[Any] =2
lowercase : Union[str, Any] =audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowercase : Dict =2
lowercase : Optional[int] =audioldm_pipe(UpperCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=UpperCAmelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowercase : Optional[int] =2
lowercase : Optional[int] =audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCAmelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
lowercase : str ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] =self.get_dummy_components()
lowercase : Dict =AudioLDMPipeline(**UpperCAmelCase )
lowercase : List[str] =audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Dict =audioldm_pipe.vocoder.config.sampling_rate
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase )
lowercase : Optional[int] =audioldm_pipe(audio_length_in_s=0.0_1_6 , **UpperCAmelCase )
lowercase : Union[str, Any] =output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase ) / vocoder_sampling_rate == 0.0_1_6
lowercase : int =audioldm_pipe(audio_length_in_s=0.0_3_2 , **UpperCAmelCase )
lowercase : Any =output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase ) / vocoder_sampling_rate == 0.0_3_2
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Any =self.get_dummy_components()
lowercase : Union[str, Any] =AudioLDMPipeline(**UpperCAmelCase )
lowercase : List[str] =audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Optional[Any] =['''hey''']
lowercase : Any =audioldm_pipe(UpperCAmelCase , num_inference_steps=1 )
lowercase : int =output.audios.shape
assert audio_shape == (1, 256)
lowercase : int =audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowercase : str =SpeechTaHifiGan(UpperCAmelCase ).to(UpperCAmelCase )
lowercase : Any =audioldm_pipe(UpperCAmelCase , num_inference_steps=1 )
lowercase : int =output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def A__ ( self : List[Any] ) -> str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase )
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any]="cpu" , UpperCAmelCase : Union[str, Any]=torch.floataa , UpperCAmelCase : List[str]=0 ) -> str:
'''simple docstring'''
lowercase : str =torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase : List[str] =np.random.RandomState(UpperCAmelCase ).standard_normal((1, 8, 128, 16) )
lowercase : Tuple =torch.from_numpy(UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase )
lowercase : Any ={
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def A__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase : int =audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Optional[int] =self.get_inputs(UpperCAmelCase )
lowercase : Tuple =25
lowercase : Optional[Any] =audioldm_pipe(**UpperCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase ) == 8_1920
lowercase : List[Any] =audio[7_7230:7_7240]
lowercase : str =np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
lowercase : List[str] =np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def A__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : List[str] =AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase : Union[str, Any] =LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowercase : Optional[int] =audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : List[Any] =self.get_inputs(UpperCAmelCase )
lowercase : int =audioldm_pipe(**UpperCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase ) == 8_1920
lowercase : List[str] =audio[2_7780:2_7790]
lowercase : Any =np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
lowercase : Optional[Any] =np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 94
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case = 1_60_00):
__snake_case = int(round(sample_rate * max_length))
if len(snake_case) <= sample_length:
return wav
__snake_case = randint(0, len(snake_case) - sample_length - 1)
return wav[random_offset : random_offset + sample_length]
@dataclass
class _A :
"""simple docstring"""
UpperCamelCase_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCamelCase_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
UpperCamelCase_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
UpperCamelCase_ : str = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCamelCase_ : str = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
UpperCamelCase_ : str = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
UpperCamelCase_ : str = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
UpperCamelCase_ : Optional[int] = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : float = field(
default=2_0 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class _A :
"""simple docstring"""
UpperCamelCase_ : str = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
UpperCamelCase_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
UpperCamelCase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCamelCase_ : bool = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
UpperCamelCase_ : bool = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
UpperCamelCase_ : bool = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCamelCase_ : Optional[bool] = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCamelCase_ : bool = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowercase ( self : int ) -> Tuple:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , A_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('''.json'''):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''', snake_case, snake_case)
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout)], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case = training_args.get_process_log_level()
logger.setLevel(snake_case)
transformers.utils.logging.set_verbosity(snake_case)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}")
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Detecting last checkpoint.
__snake_case = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to train from scratch.''')
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''')
# Initialize our dataset and prepare it for the audio classification task.
__snake_case = DatasetDict()
__snake_case = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, use_auth_token=True if model_args.use_auth_token else None, )
__snake_case = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, use_auth_token=True if model_args.use_auth_token else None, )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f"{', '.join(raw_datasets['train'].column_names)}.")
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f"{', '.join(raw_datasets['train'].column_names)}.")
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__snake_case = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path, return_attention_mask=model_args.attention_mask, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__snake_case = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate))
__snake_case = feature_extractor.model_input_names[0]
def train_transforms(snake_case):
__snake_case = []
for audio in batch[data_args.audio_column_name]:
__snake_case = random_subsample(
audio['''array'''], max_length=data_args.max_length_seconds, sample_rate=feature_extractor.sampling_rate)
subsampled_wavs.append(snake_case)
__snake_case = feature_extractor(snake_case, sampling_rate=feature_extractor.sampling_rate)
__snake_case = {model_input_name: inputs.get(snake_case)}
__snake_case = list(batch[data_args.label_column_name])
return output_batch
def val_transforms(snake_case):
__snake_case = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
__snake_case = feature_extractor(snake_case, sampling_rate=feature_extractor.sampling_rate)
__snake_case = {model_input_name: inputs.get(snake_case)}
__snake_case = list(batch[data_args.label_column_name])
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__snake_case = raw_datasets['''train'''].features[data_args.label_column_name].names
__snake_case , __snake_case = {}, {}
for i, label in enumerate(snake_case):
__snake_case = str(snake_case)
__snake_case = label
# Load the accuracy metric from the datasets package
__snake_case = evaluate.load('''accuracy''')
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case):
__snake_case = np.argmax(eval_pred.predictions, axis=1)
return metric.compute(predictions=snake_case, references=eval_pred.label_ids)
__snake_case = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(snake_case), labelaid=snake_case, idalabel=snake_case, finetuning_task='''audio-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
__snake_case = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path), config=snake_case, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__snake_case = (
raw_datasets['''train'''].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case, output_all_columns=snake_case)
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__snake_case = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case, output_all_columns=snake_case)
# Initialize our trainer
__snake_case = Trainer(
model=snake_case, args=snake_case, train_dataset=raw_datasets['''train'''] if training_args.do_train else None, eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None, compute_metrics=snake_case, tokenizer=snake_case, )
# Training
if training_args.do_train:
__snake_case = None
if training_args.resume_from_checkpoint is not None:
__snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case = last_checkpoint
__snake_case = trainer.train(resume_from_checkpoint=snake_case)
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics)
trainer.save_metrics('''train''', train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
__snake_case = trainer.evaluate()
trainer.log_metrics('''eval''', snake_case)
trainer.save_metrics('''eval''', snake_case)
# Write model card and (optionally) push to hub
__snake_case = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case)
else:
trainer.create_model_card(**snake_case)
if __name__ == "__main__":
main()
| 564
| 0
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_: Dict = logging.get_logger(__name__)
lowerCAmelCase_: Dict = "▁"
lowerCAmelCase_: Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowerCAmelCase_: Union[str, Any] = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowerCAmelCase_: Union[str, Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowerCAmelCase_: str = {
"ernie-m-base": 5_1_4,
"ernie-m-large": 5_1_4,
}
lowerCAmelCase_: Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class a__ ( _a ):
snake_case_ = ["input_ids"]
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = RESOURCE_FILES_NAMES
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=False, _UpperCAmelCase="utf8", _UpperCAmelCase="[UNK]", _UpperCAmelCase="[SEP]", _UpperCAmelCase="[PAD]", _UpperCAmelCase="[CLS]", _UpperCAmelCase="[MASK]", _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, vocab_file=_UpperCAmelCase, encoding=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_UpperCAmelCase )
else:
lowercase__ = {self.sp_model.id_to_piece(_UpperCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_UpperCAmelCase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_UpperCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_UpperCAmelCase )
else:
lowercase__ = unicodedata.normalize("NFKC", _UpperCAmelCase )
if self.is_whitespace(_UpperCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_UpperCAmelCase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_UpperCAmelCase ) + offset
lowercase__ = start + len(_UpperCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def snake_case__ ( self ):
'''simple docstring'''
return len(self.vocab )
def snake_case__ ( self ):
'''simple docstring'''
return dict(self.vocab, **self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_UpperCAmelCase, _UpperCAmelCase ) for c in text) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=64, _UpperCAmelCase=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_UpperCAmelCase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowercase__ = []
for pi, piece in enumerate(_UpperCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_UpperCAmelCase ) and pi != 0:
new_pieces.append(_UpperCAmelCase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_UpperCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_UpperCAmelCase ) or self.is_punct(_UpperCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_UpperCAmelCase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_UpperCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip()
return out_string
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_UpperCAmelCase )
lowercase__ = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip()
return out_string
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return self.vocab.get(_UpperCAmelCase, self.vocab.get(self.unk_token ) )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return self.reverse_vocab.get(_UpperCAmelCase, self.unk_token )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_UpperCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_UpperCAmelCase ) + 1) + [1] * (len(_UpperCAmelCase ) + 3)
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_UpperCAmelCase ) == 1:
lowercase__ = unicodedata.category(_UpperCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {}
with io.open(_UpperCAmelCase, "r", encoding="utf-8" ) as f:
for index, line in enumerate(_UpperCAmelCase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_UpperCAmelCase )
return token_to_idx
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_UpperCAmelCase ):
lowercase__ = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_UpperCAmelCase, "sentencepiece.bpe.model" )
with open(_UpperCAmelCase, "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (vocab_file,)
| 668
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668
| 1
|
from typing import List
from .keymap import KEYMAP, get_character
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
def decorator(__lowercase ):
_snake_case : str = getattr(__lowercase , "handle_key" , [] )
handle += [key]
setattr(__lowercase , "handle_key" , __lowercase )
return func
return decorator
def snake_case (*__lowercase ) -> int:
'''simple docstring'''
def decorator(__lowercase ):
_snake_case : Union[str, Any] = getattr(__lowercase , "handle_key" , [] )
handle += keys
setattr(__lowercase , "handle_key" , __lowercase )
return func
return decorator
class lowercase_ ( snake_case__ ):
def __new__( cls , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : int = super().__new__(cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if not hasattr(lowerCamelCase_ , "key_handler" ):
setattr(lowerCamelCase_ , "key_handler" , {} )
setattr(lowerCamelCase_ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
_snake_case : Optional[int] = getattr(lowerCamelCase_ , "handle_key" , [] )
for key in handled_keys:
_snake_case : Union[str, Any] = value
return new_cls
@staticmethod
def UpperCamelCase ( cls ):
_snake_case : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
_snake_case : List[Any] = ord(lowerCamelCase_ )
_snake_case : Optional[Any] = cls.key_handler.get(lowerCamelCase_ )
if handler:
_snake_case : List[str] = char
return handler(cls )
else:
return None
def snake_case (cls ) -> Optional[Any]:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 670
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : str = """vivit"""
def __init__( self , lowerCamelCase_=2_2_4 , lowerCamelCase_=3_2 , lowerCamelCase_=[2, 1_6, 1_6] , lowerCamelCase_=3 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu_fast" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-06 , lowerCamelCase_=True , **lowerCamelCase_ , ) -> int:
_a : Tuple = hidden_size
_a : Dict = num_hidden_layers
_a : List[str] = num_attention_heads
_a : int = intermediate_size
_a : Optional[int] = hidden_act
_a : Dict = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : List[str] = initializer_range
_a : List[str] = layer_norm_eps
_a : Any = image_size
_a : Optional[Any] = num_frames
_a : Dict = tubelet_size
_a : Union[str, Any] = num_channels
_a : Optional[int] = qkv_bias
super().__init__(**lowerCamelCase_ )
| 120
| 0
|
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 715
|
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
| 0
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase : Union[str, Any] =logging.get_logger(__name__)
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> None:
"""simple docstring"""
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 136
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Optional[int] ={
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =[
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 136
| 1
|
'''simple docstring'''
from __future__ import annotations
class _A :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : str )-> List[str]:
snake_case__ , snake_case__ : Optional[Any] = text, pattern
snake_case__ , snake_case__ : List[Any] = len(lowerCamelCase ), len(lowerCamelCase )
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : str )-> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __lowerCAmelCase ( self : str , lowerCamelCase : int )-> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __lowerCAmelCase ( self : str )-> list[int]:
# searches pattern in text and returns index positions
snake_case__ : Union[str, Any] = []
for i in range(self.textLen - self.patLen + 1 ):
snake_case__ : Any = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
snake_case__ : Optional[int] = self.match_in_pattern(self.text[mismatch_index] )
snake_case__ : Any = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase__ = 'ABAABA'
lowerCAmelCase__ = 'AB'
lowerCAmelCase__ = BoyerMooreSearch(text, pattern)
lowerCAmelCase__ = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 172
|
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = 'src/diffusers'
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
lowerCAmelCase__ = '\n{0} = None\n'
lowerCAmelCase__ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
lowerCAmelCase__ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Union[str, Any] = _re_backend.findall(UpperCAmelCase )
if len(UpperCAmelCase ) == 0:
return None
return "_and_".join(UpperCAmelCase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
with open(os.path.join(UpperCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case__ : Union[str, Any] = f.readlines()
# Get to the point we do the actual imports for type checking
snake_case__ : Optional[int] = 0
snake_case__ : Optional[Any] = {}
# Go through the end of the file
while line_index < len(UpperCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case__ : Tuple = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
snake_case__ : Optional[int] = []
# Until we unindent, add backend objects to the list
while line_index < len(UpperCAmelCase ) and len(lines[line_index] ) > 1:
snake_case__ : int = lines[line_index]
snake_case__ : List[Any] = _re_single_line_import.search(UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(UpperCAmelCase ) > 0:
snake_case__ : str = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(UpperCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(UpperCAmelCase , UpperCAmelCase )
else:
return DUMMY_CLASS.format(UpperCAmelCase , UpperCAmelCase )
def lowerCAmelCase__ ( UpperCAmelCase=None ):
"""simple docstring"""
if backend_specific_objects is None:
snake_case__ : int = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case__ : Union[str, Any] = {}
for backend, objects in backend_specific_objects.items():
snake_case__ : Union[str, Any] = """[""" + """, """.join(f"""\"{b}\"""" for b in backend.split("""_and_""" ) ) + """]"""
snake_case__ : str = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(UpperCAmelCase , UpperCAmelCase ) for o in objects] )
snake_case__ : Optional[int] = dummy_file
return dummy_files
def lowerCAmelCase__ ( UpperCAmelCase=False ):
"""simple docstring"""
snake_case__ : Tuple = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case__ : Any = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
snake_case__ : List[str] = os.path.join(UpperCAmelCase , """utils""" )
snake_case__ : Any = {
backend: os.path.join(UpperCAmelCase , f"""dummy_{short_names.get(UpperCAmelCase , UpperCAmelCase )}_objects.py""" )
for backend in dummy_files.keys()
}
snake_case__ : List[str] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(UpperCAmelCase ):
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case__ : str = f.read()
else:
snake_case__ : int = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"""Updating diffusers.utils.dummy_{short_names.get(UpperCAmelCase , UpperCAmelCase )}_objects.py as the main """
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f"""diffusers.utils.dummy_{short_names.get(UpperCAmelCase , UpperCAmelCase )}_objects.py. Run `make fix-copies` """
"""to fix this.""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 172
| 1
|
'''simple docstring'''
import math
import qiskit
def _A ( _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 ):
"""simple docstring"""
if (
isinstance(a_ , a_ )
or isinstance(a_ , a_ )
or isinstance(a_ , a_ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(a_ ) != input_a)
or (math.floor(a_ ) != input_a)
or (math.floor(a_ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__lowercase =qiskit.QuantumRegister(4 , 'qr' )
__lowercase =qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
__lowercase =[input_a, input_a, carry_in]
__lowercase =qiskit.QuantumCircuit(a_ , a_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(a_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , a_ ) # measure the last two qbits
__lowercase =qiskit.Aer.get_backend('aer_simulator' )
__lowercase =qiskit.execute(a_ , a_ , shots=1_000 )
return job.result().get_counts(a_ )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 474
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , lowercase__ ) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
SCREAMING_SNAKE_CASE : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[int] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = 'sgugger/tiny-distilbert-classification'
SCREAMING_SNAKE_CASE : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , only_pretrain_model=lowercase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Dict = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , torchscript=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Any = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , fpaa=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowercase__ )
# set architectures equal to `None`
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : Tuple = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase__ , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : Dict = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Any = 'sshleifer/tinier_bart'
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : List[str] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = 'sshleifer/tinier_bart'
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , save_to_csv=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase__ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase__ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase__ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase__ , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase__ , 'env.csv' ) , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : Any = PyTorchBenchmark(lowercase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase__ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ , 'env.csv' ) ).exists() )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase__ ):
self.assertTrue(hasattr(lowercase__ , 'sequential' ) )
self.assertTrue(hasattr(lowercase__ , 'cumulative' ) )
self.assertTrue(hasattr(lowercase__ , 'current' ) )
self.assertTrue(hasattr(lowercase__ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase__ , 'log.txt' ) , log_print=lowercase__ , trace_memory_line_by_line=lowercase__ , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase__ , 'log.txt' ) ).exists() )
| 251
| 0
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__snake_case : Tuple =logging.get_logger(__name__) # pylint: disable=invalid-name
__snake_case : Tuple =2_5_6
class lowerCamelCase__ ( A_):
'''simple docstring'''
snake_case_ =['''melgan''']
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,) -> Any:
"""simple docstring"""
super().__init__()
# From MELGAN
lowerCAmelCase__ : Any = math.log(1e-5 ) # Matches MelGAN training.
lowerCAmelCase__ : Optional[int] = 4.0 # Largest value for most examples
lowerCAmelCase__ : Dict = 1_28
self.register_modules(
notes_encoder=__lowerCamelCase ,continuous_encoder=__lowerCamelCase ,decoder=__lowerCamelCase ,scheduler=__lowerCamelCase ,melgan=__lowerCamelCase ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=(-1.0, 1.0) ,__lowerCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = output_range
if clip:
lowerCAmelCase__ : Dict = torch.clip(__lowerCamelCase ,self.min_value ,self.max_value )
# Scale to [0, 1].
lowerCAmelCase__ : Optional[int] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=(-1.0, 1.0) ,__lowerCamelCase=False ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Dict = input_range
lowerCAmelCase__ : int = torch.clip(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) if clip else outputs
# Scale to [0, 1].
lowerCAmelCase__ : Tuple = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Tuple = input_tokens > 0
lowerCAmelCase__ : List[Any] = self.notes_encoder(
encoder_input_tokens=__lowerCamelCase ,encoder_inputs_mask=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowerCamelCase ,encoder_inputs_mask=__lowerCamelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = noise_time
if not torch.is_tensor(__lowerCamelCase ):
lowerCAmelCase__ : List[Any] = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(__lowerCamelCase ) and len(timesteps.shape ) == 0:
lowerCAmelCase__ : List[str] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase__ : Union[str, Any] = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
lowerCAmelCase__ : Optional[int] = self.decoder(
encodings_and_masks=__lowerCamelCase ,decoder_input_tokens=__lowerCamelCase ,decoder_noise_time=__lowerCamelCase )
return logits
@torch.no_grad()
def __call__(self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = 1_00 ,__lowerCamelCase = True ,__lowerCamelCase = "numpy" ,__lowerCamelCase = None ,__lowerCamelCase = 1 ,) -> Tuple:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCamelCase ,__lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowerCamelCase )}.""" )
lowerCAmelCase__ : Optional[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
lowerCAmelCase__ : List[Any] = np.zeros([1, 0, self.n_dims] ,np.floataa )
lowerCAmelCase__ : List[Any] = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=__lowerCamelCase ,device=self.device )
for i, encoder_input_tokens in enumerate(__lowerCamelCase ):
if i == 0:
lowerCAmelCase__ : Any = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowerCAmelCase__ : Optional[Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=__lowerCamelCase ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowerCAmelCase__ : int = ones
lowerCAmelCase__ : List[Any] = self.scale_features(
__lowerCamelCase ,output_range=[-1.0, 1.0] ,clip=__lowerCamelCase )
lowerCAmelCase__ : int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=__lowerCamelCase ,continuous_mask=__lowerCamelCase ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowerCAmelCase__ : int = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=__lowerCamelCase ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase__ : List[Any] = self.decode(
encodings_and_masks=__lowerCamelCase ,input_tokens=__lowerCamelCase ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
lowerCAmelCase__ : List[str] = self.scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,generator=__lowerCamelCase ).prev_sample
lowerCAmelCase__ : Any = self.scale_to_features(__lowerCamelCase ,input_range=[-1.0, 1.0] )
lowerCAmelCase__ : Optional[Any] = mel[:1]
lowerCAmelCase__ : List[Any] = mel.cpu().float().numpy()
lowerCAmelCase__ : Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCamelCase ,__lowerCamelCase )
logger.info('''Generated segment''' ,__lowerCamelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
lowerCAmelCase__ : Optional[int] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowerCAmelCase__ : Any = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowerCamelCase )
| 707
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__snake_case : Optional[int] =(3, 9, -1_1, 0, 7, 5, 1, -1)
__snake_case : str =(4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ =42
snake_case_ =42
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Node | None = None
for i in sorted(__lowerCamelCase ,reverse=__lowerCamelCase ):
lowerCAmelCase__ : Dict = Node(__lowerCamelCase ,self.head )
def __iter__(self ) -> Iterator[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.head
while node:
yield node.data
lowerCAmelCase__ : Dict = node.next_node
def __len__(self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__(self ) -> str:
"""simple docstring"""
return " -> ".join([str(__lowerCamelCase ) for node in self] )
def lowerCAmelCase__ ( lowerCamelCase_ : SortedLinkedList ,lowerCamelCase_ : SortedLinkedList):
'''simple docstring'''
return SortedLinkedList(list(lowerCamelCase_) + list(lowerCamelCase_))
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Any =SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 90
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["""MaskFormerFeatureExtractor"""]
A = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
A = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 77
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase : List[Any] = ["""text""", """image""", """audio"""]
def _A ( A ) -> Dict:
lowercase : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(A ,A ):
inputs.append(create_inputs(A ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _A ( A ) -> str:
lowercase : Tuple = []
for output in outputs:
if isinstance(A ,(str, AgentText) ):
output_types.append("text" )
elif isinstance(A ,(Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(A ,(torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class _UpperCamelCase :
'''simple docstring'''
def a__ ( self ) -> Optional[Any]:
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def a__ ( self ) -> Any:
lowercase : Any = create_inputs(self.tool.inputs )
lowercase : Tuple = self.tool(*a_ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase : Any = [outputs]
self.assertListEqual(output_types(a_ ) , self.tool.outputs )
def a__ ( self ) -> List[str]:
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def a__ ( self ) -> int:
lowercase : str = create_inputs(self.tool.inputs )
lowercase : str = self.tool(*a_ )
if not isinstance(a_ , a_ ):
lowercase : Union[str, Any] = [outputs]
self.assertEqual(len(a_ ) , len(self.tool.outputs ) )
for output, output_type in zip(a_ , self.tool.outputs ):
lowercase : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a_ , a_ ) )
def a__ ( self ) -> Optional[int]:
lowercase : int = create_inputs(self.tool.inputs )
lowercase : str = []
for _input, input_type in zip(a_ , self.tool.inputs ):
if isinstance(a_ , a_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase : Optional[int] = self.tool(*a_ )
if not isinstance(a_ , a_ ):
lowercase : str = [outputs]
self.assertEqual(len(a_ ) , len(self.tool.outputs ) )
| 372
| 0
|
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Optional[int]:
__UpperCAmelCase = k_size // 2
__UpperCAmelCase , __UpperCAmelCase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCAmelCase = 1 / (2 * pi * sigma) * exp(-(square(UpperCamelCase__ ) + square(UpperCamelCase__ )) / (2 * square(UpperCamelCase__ )) )
return g
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> List[Any]:
__UpperCAmelCase , __UpperCAmelCase = image.shape[0], image.shape[1]
# dst image height and width
__UpperCAmelCase = height - k_size + 1
__UpperCAmelCase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCAmelCase = zeros((dst_height * dst_width, k_size * k_size) )
__UpperCAmelCase = 0
for i, j in product(range(UpperCamelCase__ ) , range(UpperCamelCase__ ) ):
__UpperCAmelCase = ravel(image[i : i + k_size, j : j + k_size] )
__UpperCAmelCase = window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCAmelCase = gen_gaussian_kernel(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = ravel(UpperCamelCase__ )
# reshape and get the dst image
__UpperCAmelCase = dot(UpperCamelCase__ , UpperCamelCase__ ).reshape(UpperCamelCase__ , UpperCamelCase__ ).astype(UpperCamelCase__ )
return dst
if __name__ == "__main__":
# read original image
_A: Union[str, Any] = imread(r"""../image_data/lena.jpg""")
# turn image in gray scale value
_A: List[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_A: Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_A: Optional[int] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 715
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A: int = logging.getLogger(__name__)
class UpperCAmelCase :
def __init__( self ):
__UpperCAmelCase = False
def __lowerCamelCase ( self , __A , __A , __A , __A ):
if not self.initialized:
__UpperCAmelCase = RagRetriever(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , index=__A , init_retrieval=__A , )
__UpperCAmelCase = True
def __lowerCamelCase ( self ):
self.retriever.index.init_index()
def __lowerCamelCase ( self , __A , __A ):
__UpperCAmelCase , __UpperCAmelCase = self.retriever._main_retrieve(__A , __A )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , __A , __A , __A , __A , __A=None ):
if index is not None and index.is_initialized() and len(__A ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , index=__A , init_retrieval=__A , )
__UpperCAmelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__A , __A , __A , __A )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ):
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , __A , __A ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCAmelCase , __UpperCAmelCase = ray.get(random_worker.retrieve.remote(__A , __A ) )
else:
__UpperCAmelCase , __UpperCAmelCase = self._main_retrieve(__A , __A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__A )
@classmethod
def __lowerCamelCase ( cls , __A , __A=None , **__A ):
return super(__A , cls ).get_tokenizers(__A , __A , **__A )
@classmethod
def __lowerCamelCase ( cls , __A , __A , __A=None , **__A ):
__UpperCAmelCase = kwargs.pop('config' , __A ) or RagConfig.from_pretrained(__A , **__A )
__UpperCAmelCase = RagTokenizer.from_pretrained(__A , config=__A )
__UpperCAmelCase = rag_tokenizer.question_encoder
__UpperCAmelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCAmelCase = 'custom'
__UpperCAmelCase = CustomHFIndex(config.retrieval_vector_size , __A )
else:
__UpperCAmelCase = cls._build_index(__A )
return cls(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , retrieval_workers=__A , index=__A , )
| 617
| 0
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def snake_case_ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase__ : Union[str, Any] = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching ,"os.path.join" ,SCREAMING_SNAKE_CASE_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os ,_PatchedModuleObj )
assert isinstance(_test_patching.os.path ,_PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path ,_PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os ,_PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path ,_PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path ,_PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def snake_case_ ( ) -> Tuple:
assert _test_patching.open is open
lowercase__ : Tuple = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching ,"open" ,SCREAMING_SNAKE_CASE_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def snake_case_ ( ) -> Union[str, Any]:
# pandas.read_csv is not present in _test_patching
lowercase__ : List[str] = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching ,"pandas.read_csv" ,SCREAMING_SNAKE_CASE_ ):
pass
def snake_case_ ( ) -> Dict:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
lowercase__ : Optional[Any] = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching ,"len" ,SCREAMING_SNAKE_CASE_ ) is None
with patch_submodule(_test_patching ,"len" ,SCREAMING_SNAKE_CASE_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def snake_case_ ( ) -> Optional[Any]:
lowercase__ : str = "__test_patch_submodule_start_and_stop_mock__"
lowercase__ : str = patch_submodule(_test_patching ,"open" ,SCREAMING_SNAKE_CASE_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def snake_case_ ( ) -> Optional[Any]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase__ : Any = "__test_patch_submodule_successive_join__"
lowercase__ : Union[str, Any] = "__test_patch_submodule_successive_dirname__"
lowercase__ : List[Any] = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching ,"os.path.join" ,SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching ,"os.rename" ,SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching ,"os.path.dirname" ,SCREAMING_SNAKE_CASE_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching ,"os.rename" ,SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching ,"os.path.join" ,SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching ,"os.path.dirname" ,SCREAMING_SNAKE_CASE_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def snake_case_ ( ) -> List[Any]:
lowercase__ : Optional[int] = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching ,"__module_that_doesn_exist__.__attribute_that_doesn_exist__" ,SCREAMING_SNAKE_CASE_ ):
pass
with patch_submodule(_test_patching ,"os.__attribute_that_doesn_exist__" ,SCREAMING_SNAKE_CASE_ ):
pass
| 397
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a : List[str] = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = ['''MaskFormerFeatureExtractor''']
__a : str = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
__a : Optional[int] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 397
| 1
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> int:
super().__init__()
snake_case_ = value_function
snake_case_ = unet
snake_case_ = scheduler
snake_case_ = env
snake_case_ = env.get_dataset()
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].std()
except: # noqa: E722
pass
snake_case_ = env.observation_space.shape[0]
snake_case_ = env.action_space.shape[0]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> int:
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase_ ( self , lowerCamelCase ) -> List[str]:
if type(lowerCamelCase ) is dict:
return {k: self.to_torch(lowerCamelCase ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase , device=self.unet.device )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
for key, val in cond.items():
snake_case_ = val.clone()
return x_in
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
snake_case_ = x.shape[0]
snake_case_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ = torch.full((batch_size,) , lowerCamelCase , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase ).sample
snake_case_ = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ = self.scheduler._get_variance(lowerCamelCase )
snake_case_ = torch.exp(0.5 * posterior_variance )
snake_case_ = model_std * grad
snake_case_ = 0
snake_case_ = x.detach()
snake_case_ = x + scale * grad
snake_case_ = self.reset_xa(lowerCamelCase , lowerCamelCase , self.action_dim )
snake_case_ = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , predict_epsilon=lowerCamelCase )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
snake_case_ = self.reset_xa(lowerCamelCase , lowerCamelCase , self.action_dim )
snake_case_ = self.to_torch(lowerCamelCase )
return x, y
def __call__( self , lowerCamelCase , lowerCamelCase=64 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=0.1 ) -> List[Any]:
# normalize the observations and create batch dimension
snake_case_ = self.normalize(lowerCamelCase , """observations""" )
snake_case_ = obs[None].repeat(lowerCamelCase , axis=0 )
snake_case_ = {0: self.to_torch(lowerCamelCase )}
snake_case_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ = randn_tensor(lowerCamelCase , device=self.unet.device )
snake_case_ = self.reset_xa(lowerCamelCase , lowerCamelCase , self.action_dim )
snake_case_ = self.to_torch(lowerCamelCase )
# run the diffusion process
snake_case_ , snake_case_ = self.run_diffusion(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# sort output trajectories by value
snake_case_ = y.argsort(0 , descending=lowerCamelCase ).squeeze()
snake_case_ = x[sorted_idx]
snake_case_ = sorted_values[:, :, : self.action_dim]
snake_case_ = actions.detach().cpu().numpy()
snake_case_ = self.de_normalize(lowerCamelCase , key="""actions""" )
# select the action with the highest value
if y is not None:
snake_case_ = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ = np.random.randint(0 , lowerCamelCase )
snake_case_ = denorm_actions[selected_index, 0]
return denorm_actions
| 161
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = 'vit_msn'
def __init__( self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-06 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=True , **lowerCamelCase , ) -> Optional[int]:
super().__init__(**lowerCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
| 161
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71
|
from math import factorial
class _a :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ):
_UpperCAmelCase =real
if isinstance(_snake_case , _snake_case ):
_UpperCAmelCase =[1] * rank
else:
_UpperCAmelCase =rank
def __repr__( self ):
return (
F"{self.real}+"
F"{'+'.join(str(_snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _snake_case )
def __add__( self , _snake_case ):
if not isinstance(_snake_case , _snake_case ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase =self.duals.copy()
_UpperCAmelCase =other.duals.copy()
if len(_snake_case ) > len(_snake_case ):
o_dual.extend([1] * (len(_snake_case ) - len(_snake_case )) )
elif len(_snake_case ) < len(_snake_case ):
s_dual.extend([1] * (len(_snake_case ) - len(_snake_case )) )
_UpperCAmelCase =[]
for i in range(len(_snake_case ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _snake_case )
snake_case =__add__
def __sub__( self , _snake_case ):
return self + other * -1
def __mul__( self , _snake_case ):
if not isinstance(_snake_case , _snake_case ):
_UpperCAmelCase =[]
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _snake_case )
_UpperCAmelCase =[0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _snake_case )
snake_case =__mul__
def __truediv__( self , _snake_case ):
if not isinstance(_snake_case , _snake_case ):
_UpperCAmelCase =[]
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _snake_case )
raise ValueError
def __floordiv__( self , _snake_case ):
if not isinstance(_snake_case , _snake_case ):
_UpperCAmelCase =[]
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _snake_case )
raise ValueError
def __pow__( self , _snake_case ):
if n < 0 or isinstance(_snake_case , _snake_case ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase =self
for _ in range(n - 1 ):
x *= self
return x
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
if not callable(_lowerCamelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(_lowerCamelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase =Dual(_lowerCamelCase , 1 )
_UpperCAmelCase =func(_lowerCamelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCamelCase__ ( _lowerCamelCase ) ->Optional[int]:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 408
| 0
|
"""simple docstring"""
from typing import Any
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Any ):
_A = data
_A = None
class lowercase_ :
'''simple docstring'''
def __init__( self : int ):
_A = None
def lowerCAmelCase_ ( self : List[str] ):
_A = self.head
while temp is not None:
print(temp.data , end=' ' )
_A = temp.next
print()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Any ):
_A = Node(_UpperCAmelCase )
_A = self.head
_A = new_node
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ):
if node_data_a == node_data_a:
return
else:
_A = self.head
while node_a is not None and node_a.data != node_data_a:
_A = node_a.next
_A = self.head
while node_a is not None and node_a.data != node_data_a:
_A = node_a.next
if node_a is None or node_a is None:
return
_A , _A = node_a.data, node_a.data
if __name__ == "__main__":
a = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 505
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : NestedDataStructureLike[PathLike] , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : str , ):
super().__init__(
_UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
_A = field
_A = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else {self.split: path_or_paths}
_A = Json(
cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , field=_UpperCAmelCase , **_UpperCAmelCase , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Build iterable dataset
if self.streaming:
_A = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_A = None
_A = None
_A = None
_A = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
_A = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Dataset , _UpperCAmelCase : Union[PathLike, BinaryIO] , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : int , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A = dataset
_A = path_or_buf
_A = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A = num_proc
_A = 'utf-8'
_A = to_json_kwargs
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.to_json_kwargs.pop('path_or_buf' , _UpperCAmelCase )
_A = self.to_json_kwargs.pop('orient' , 'records' )
_A = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
_A = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
_A = self.to_json_kwargs.pop('compression' , _UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=_UpperCAmelCase ) as buffer:
_A = self._write(file_obj=_UpperCAmelCase , orient=_UpperCAmelCase , lines=_UpperCAmelCase , index=_UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
' was passed. Please provide a local path instead.' )
_A = self._write(
file_obj=self.path_or_buf , orient=_UpperCAmelCase , lines=_UpperCAmelCase , index=_UpperCAmelCase , **self.to_json_kwargs )
return written
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[Any] ):
_A , _A , _A , _A , _A = args
_A = query_table(
table=self.dataset.data , key=slice(_UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_A = batch.to_pandas().to_json(
path_or_buf=_UpperCAmelCase , orient=_UpperCAmelCase , lines=_UpperCAmelCase , index=_UpperCAmelCase , **_UpperCAmelCase )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : BinaryIO , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , **_UpperCAmelCase : Optional[Any] , ):
_A = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
_A = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_UpperCAmelCase )
else:
_A , _A = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _UpperCAmelCase , _UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(_UpperCAmelCase )
return written
| 505
| 1
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowerCAmelCase ( A_ ):
"""simple docstring"""
def __magic_name__ ( self ) -> Tuple:
__a : Any = tempfile.mkdtemp()
__a : Any = 5
# Realm tok
__a : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a : Tuple = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(_A , exist_ok=_A )
__a : Any = os.path.join(_A , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__a : Tuple = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(_A , exist_ok=_A )
def __magic_name__ ( self ) -> str:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def __magic_name__ ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> Optional[Any]:
__a : Optional[int] = RealmConfig(num_block_records=self.num_block_records )
return config
def __magic_name__ ( self ) -> int:
__a : Tuple = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def __magic_name__ ( self ) -> int:
__a : Optional[Any] = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=_A , )
return block_records
def __magic_name__ ( self ) -> Optional[Any]:
__a : List[Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __magic_name__ ( self ) -> Tuple:
__a : Optional[Any] = self.get_config()
__a : Dict = self.get_dummy_retriever()
__a : Any = retriever.tokenizer
__a : Optional[Any] = np.array([0, 3] , dtype='long' )
__a : Optional[int] = tokenizer(['Test question'] ).input_ids
__a : Dict = tokenizer(
['the fourth'] , add_special_tokens=_A , return_token_type_ids=_A , return_attention_mask=_A , ).input_ids
__a : List[str] = config.reader_seq_len
__a : str = retriever(
_A , _A , answer_ids=_A , max_length=_A , return_tensors='np' )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def __magic_name__ ( self ) -> List[Any]:
__a : Union[str, Any] = self.get_config()
__a : str = self.get_dummy_retriever()
__a : List[Any] = retriever.tokenizer
__a : Dict = np.array([0, 3, 5] , dtype='long' )
__a : Any = tokenizer(['Test question'] ).input_ids
__a : Tuple = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_A , return_token_type_ids=_A , return_attention_mask=_A , ).input_ids
__a : Optional[int] = config.reader_seq_len
__a : Union[str, Any] = retriever(
_A , _A , answer_ids=_A , max_length=_A , return_tensors='np' )
self.assertEqual([False, True, True] , _A )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _A )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _A )
def __magic_name__ ( self ) -> Optional[Any]:
__a : str = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
__a : List[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
__a : Union[str, Any] = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
__a : List[Any] = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
| 597
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int = 6 ):
"""simple docstring"""
_lowercase : Node | None = None
_lowercase : Node | None = None
self.create_linked_list(UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = Node()
_lowercase : Any = current_node
_lowercase : List[Any] = current_node
_lowercase : str = current_node
for _ in range(1 , UpperCamelCase ):
_lowercase : Any = Node()
_lowercase : Union[str, Any] = current_node
_lowercase : List[str] = previous_node
_lowercase : List[str] = current_node
_lowercase : str = self.front
_lowercase : Optional[int] = previous_node
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCAmelCase_ ( self : Any , UpperCamelCase : Any ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_lowercase : List[str] = self.rear.next
if self.rear:
_lowercase : Union[str, Any] = data
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_lowercase : Optional[int] = self.front.data
_lowercase : Any = None
return data
_lowercase : Union[str, Any] = self.front
_lowercase : int = old_front.next
_lowercase : Any = old_front.data
_lowercase : Any = None
return data
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
if self.is_empty():
raise Exception('''Empty Queue''' )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : int ):
"""simple docstring"""
_lowercase : Any | None = None
_lowercase : Node | None = None
_lowercase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = BlipImageProcessor()
__lowercase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
__lowercase = BlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
__lowercase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
__lowercase = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = processor(text=lowerCAmelCase__ )
__lowercase = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCAmelCase__ )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 522
|
from math import factorial
__a : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase ) )
def UpperCAmelCase ( lowercase = 60 , lowercase = 1000000 ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or not isinstance(lowercase , lowercase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
__lowercase = 0
# the cached sizes of the previous chains
__lowercase = {}
for start_chain_element in range(1 , lowercase ):
# The temporary set will contain the elements of the chain
__lowercase = set()
__lowercase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__lowercase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase )
chain_set_length += 1
__lowercase = digit_factorial_sum(lowercase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__lowercase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 522
| 1
|
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase_ : Any = threading.Lock()
UpperCAmelCase_ : Optional[logging.Handler] = None
UpperCAmelCase_ : str = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase_ : int = logging.WARNING
UpperCAmelCase_ : Optional[Any] = True
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = os.getenv("TRANSFORMERS_VERBOSITY" , _lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def A_ ( ):
"""simple docstring"""
return __name__.split("." )[0]
def A_ ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def A_ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowerCamelCase : int = logging.StreamHandler() # Set sys.stderr as stream.
_lowerCamelCase : Tuple = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowerCamelCase : Optional[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowerCamelCase : Dict = False
def A_ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_lowerCamelCase : Dict = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowerCamelCase : int = None
def A_ ( ):
"""simple docstring"""
return log_levels
def A_ ( _lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if name is None:
_lowerCamelCase : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def A_ ( _lowerCAmelCase : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowerCamelCase : str = False
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowerCamelCase : int = True
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = _get_library_root_logger().handlers
for handler in handlers:
_lowerCamelCase : str = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Dict = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_lowerCAmelCase )
def A_ ( self : Optional[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , _lowerCAmelCase )
if no_advisory_warnings:
return
self.warning(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ : int = warning_advice
@functools.lru_cache(_lowerCAmelCase )
def A_ ( self : int , *_lowerCAmelCase : str , **_lowerCAmelCase : Tuple ):
"""simple docstring"""
self.warning(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ : Dict = warning_once
class UpperCAmelCase__ :
def __init__( self : Any,*__A : str,**__A : Tuple ): # pylint: disable=unused-argument
_lowerCamelCase : Any = args[0] if args else None
def __iter__( self : List[Any] ):
return iter(self._iterator )
def __getattr__( self : Any,__A : int ):
def empty_fn(*__A : int,**__A : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[int] ):
return self
def __exit__( self : List[Any],__A : List[str],__A : List[str],__A : List[str] ):
return
class UpperCAmelCase__ :
def __call__( self : List[Any],*__A : Dict,**__A : List[str] ):
if _tqdm_active:
return tqdm_lib.tqdm(*__A,**__A )
else:
return EmptyTqdm(*__A,**__A )
def lowerCamelCase_ ( self : Any,*__A : Tuple,**__A : List[Any] ):
_lowerCamelCase : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__A,**__A )
def lowerCamelCase_ ( self : Any ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase_ : str = _tqdm_cls()
def A_ ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def A_ ( ):
"""simple docstring"""
global _tqdm_active
_lowerCamelCase : Any = True
hf_hub_utils.enable_progress_bars()
def A_ ( ):
"""simple docstring"""
global _tqdm_active
_lowerCamelCase : Any = False
hf_hub_utils.disable_progress_bars()
| 44
|
import os
import time
import numpy as np
import onnxruntime as ort
__snake_case :Any ='1'
__snake_case :List[str] ='0'
__snake_case :Union[str, Any] ='1'
__snake_case :Optional[Any] =ort.SessionOptions()
__snake_case :List[str] =ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
__snake_case :int =['TensorrtExecutionProvider', 'CUDAExecutionProvider']
__snake_case :Optional[Any] =ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
__snake_case :int =ort.RunOptions()
__snake_case :Tuple =128
__snake_case :List[Any] =1
__snake_case :Union[str, Any] =np.ones((batch, sequence), dtype=np.intaa)
__snake_case :Optional[Any] =np.ones((batch, sequence), dtype=np.intaa)
__snake_case :Optional[int] =np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
__snake_case :Union[str, Any] =time.time()
__snake_case :int =2000
__snake_case :Optional[int] ={}
for iter in range(max_iters):
__snake_case :Optional[Any] =sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
| 106
| 0
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A_ ( snake_case_ ):
UpperCAmelCase__ = '''Wav2Vec2FeatureExtractor'''
UpperCAmelCase__ = '''AutoTokenizer'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = self.feature_extractor
__magic_name__ = False
@classmethod
def _snake_case ( cls : int , __lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ) -> List[str]:
try:
return super().from_pretrained(__lowerCamelCase , **__lowerCamelCase )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , __lowerCamelCase , )
__magic_name__ = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = WavaVecaCTCTokenizer.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
return cls(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
def __call__( self : Tuple , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
__magic_name__ = kwargs.pop("raw_speech" )
else:
__magic_name__ = kwargs.pop("audio" , __lowerCamelCase )
__magic_name__ = kwargs.pop("sampling_rate" , __lowerCamelCase )
__magic_name__ = kwargs.pop("text" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__magic_name__ = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
__magic_name__ = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__magic_name__ = encodings["input_ids"]
return inputs
def _snake_case ( self : List[str] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Any ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = kwargs.pop("input_features" , __lowerCamelCase )
__magic_name__ = kwargs.pop("labels" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if input_features is not None:
__magic_name__ = self.feature_extractor.pad(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
if labels is not None:
__magic_name__ = self.tokenizer.pad(__lowerCamelCase , **__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__magic_name__ = labels["input_ids"]
return input_features
def _snake_case ( self : List[str] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> List[str]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : str , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[Any] ) -> Any:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@contextmanager
def _snake_case ( self : Optional[int] ) -> Optional[int]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
__magic_name__ = True
__magic_name__ = self.tokenizer
yield
__magic_name__ = self.feature_extractor
__magic_name__ = False
| 700
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class A_ ( snake_case_ ):
UpperCAmelCase__ = 42
class A_ ( snake_case_ , snake_case_ ):
UpperCAmelCase__ = True
@register_to_config
def __init__( self : Any , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 3 , __lowerCamelCase : Tuple[str] = ("DownEncoderBlock2D",) , __lowerCamelCase : Tuple[str] = ("UpDecoderBlock2D",) , __lowerCamelCase : Tuple[int] = (6_4,) , __lowerCamelCase : int = 1 , __lowerCamelCase : str = "silu" , __lowerCamelCase : int = 4 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : float = 0.1_8215 , ) -> Any:
super().__init__()
# pass init params to Encoder
__magic_name__ = Encoder(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , down_block_types=__lowerCamelCase , block_out_channels=__lowerCamelCase , layers_per_block=__lowerCamelCase , act_fn=__lowerCamelCase , norm_num_groups=__lowerCamelCase , double_z=__lowerCamelCase , )
# pass init params to Decoder
__magic_name__ = Decoder(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , up_block_types=__lowerCamelCase , block_out_channels=__lowerCamelCase , layers_per_block=__lowerCamelCase , norm_num_groups=__lowerCamelCase , act_fn=__lowerCamelCase , )
__magic_name__ = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__magic_name__ = nn.Convad(__lowerCamelCase , __lowerCamelCase , 1 )
__magic_name__ = False
__magic_name__ = False
# only relevant if vae tiling is enabled
__magic_name__ = self.config.sample_size
__magic_name__ = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__magic_name__ = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__magic_name__ = 0.25
def _snake_case ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ) -> Optional[int]:
if isinstance(__lowerCamelCase , (Encoder, Decoder) ):
__magic_name__ = value
def _snake_case ( self : Dict , __lowerCamelCase : bool = True ) -> int:
__magic_name__ = use_tiling
def _snake_case ( self : Dict ) -> Optional[int]:
self.enable_tiling(__lowerCamelCase )
def _snake_case ( self : int ) -> str:
__magic_name__ = True
def _snake_case ( self : Optional[Any] ) -> Tuple:
__magic_name__ = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _snake_case ( self : Optional[int] ) -> Dict[str, AttentionProcessor]:
__magic_name__ = {}
def fn_recursive_add_processors(__lowerCamelCase : str , __lowerCamelCase : torch.nn.Module , __lowerCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(__lowerCamelCase , "set_processor" ):
__magic_name__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , __lowerCamelCase , __lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return processors
def _snake_case ( self : Dict , __lowerCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> int:
__magic_name__ = len(self.attn_processors.keys() )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(__lowerCamelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__lowerCamelCase : str , __lowerCamelCase : torch.nn.Module , __lowerCamelCase : List[str] ):
if hasattr(__lowerCamelCase , "set_processor" ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
module.set_processor(__lowerCamelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , __lowerCamelCase , __lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _snake_case ( self : str , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : bool = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__lowerCamelCase , return_dict=__lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__magic_name__ = [self.encoder(__lowerCamelCase ) for x_slice in x.split(1 )]
__magic_name__ = torch.cat(__lowerCamelCase )
else:
__magic_name__ = self.encoder(__lowerCamelCase )
__magic_name__ = self.quant_conv(__lowerCamelCase )
__magic_name__ = DiagonalGaussianDistribution(__lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__lowerCamelCase , return_dict=__lowerCamelCase )
__magic_name__ = self.post_quant_conv(__lowerCamelCase )
__magic_name__ = self.decoder(__lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
@apply_forward_hook
def _snake_case ( self : Optional[Any] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
__magic_name__ = [self._decode(__lowerCamelCase ).sample for z_slice in z.split(1 )]
__magic_name__ = torch.cat(__lowerCamelCase )
else:
__magic_name__ = self._decode(__lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ) -> Optional[int]:
__magic_name__ = min(a.shape[2] , b.shape[2] , __lowerCamelCase )
for y in range(__lowerCamelCase ):
__magic_name__ = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _snake_case ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Optional[int]:
__magic_name__ = min(a.shape[3] , b.shape[3] , __lowerCamelCase )
for x in range(__lowerCamelCase ):
__magic_name__ = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _snake_case ( self : Optional[int] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : bool = True ) -> AutoencoderKLOutput:
__magic_name__ = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__magic_name__ = int(self.tile_latent_min_size * self.tile_overlap_factor )
__magic_name__ = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__magic_name__ = []
for i in range(0 , x.shape[2] , __lowerCamelCase ):
__magic_name__ = []
for j in range(0 , x.shape[3] , __lowerCamelCase ):
__magic_name__ = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__magic_name__ = self.encoder(__lowerCamelCase )
__magic_name__ = self.quant_conv(__lowerCamelCase )
row.append(__lowerCamelCase )
rows.append(__lowerCamelCase )
__magic_name__ = []
for i, row in enumerate(__lowerCamelCase ):
__magic_name__ = []
for j, tile in enumerate(__lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__magic_name__ = self.blend_v(rows[i - 1][j] , __lowerCamelCase , __lowerCamelCase )
if j > 0:
__magic_name__ = self.blend_h(row[j - 1] , __lowerCamelCase , __lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__lowerCamelCase , dim=3 ) )
__magic_name__ = torch.cat(__lowerCamelCase , dim=2 )
__magic_name__ = DiagonalGaussianDistribution(__lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowerCamelCase )
def _snake_case ( self : str , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
__magic_name__ = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__magic_name__ = int(self.tile_sample_min_size * self.tile_overlap_factor )
__magic_name__ = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__magic_name__ = []
for i in range(0 , z.shape[2] , __lowerCamelCase ):
__magic_name__ = []
for j in range(0 , z.shape[3] , __lowerCamelCase ):
__magic_name__ = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__magic_name__ = self.post_quant_conv(__lowerCamelCase )
__magic_name__ = self.decoder(__lowerCamelCase )
row.append(__lowerCamelCase )
rows.append(__lowerCamelCase )
__magic_name__ = []
for i, row in enumerate(__lowerCamelCase ):
__magic_name__ = []
for j, tile in enumerate(__lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__magic_name__ = self.blend_v(rows[i - 1][j] , __lowerCamelCase , __lowerCamelCase )
if j > 0:
__magic_name__ = self.blend_h(row[j - 1] , __lowerCamelCase , __lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__lowerCamelCase , dim=3 ) )
__magic_name__ = torch.cat(__lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
def _snake_case ( self : List[str] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
__magic_name__ = sample
__magic_name__ = self.encode(__lowerCamelCase ).latent_dist
if sample_posterior:
__magic_name__ = posterior.sample(generator=__lowerCamelCase )
else:
__magic_name__ = posterior.mode()
__magic_name__ = self.decode(__lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
| 468
| 0
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a ( __snake_case : Optional[Any], __snake_case : Union[str, Any], __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ :List[str] = 1.5
UpperCAmelCase_ :List[Any] = int(factor * num_class_images )
UpperCAmelCase_ :Union[str, Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''', indice_name='''laion_400m''', num_images=__snake_case, aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images', exist_ok=__snake_case )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCAmelCase_ :int = client.query(text=__snake_case )
if len(__snake_case ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCAmelCase_ :str = int(factor * num_images )
UpperCAmelCase_ :Optional[Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''', indice_name='''laion_400m''', num_images=__snake_case, aesthetic_weight=0.1, )
UpperCAmelCase_ :List[Any] = 0
UpperCAmelCase_ :int = 0
UpperCAmelCase_ :List[str] = tqdm(desc='''downloading real regularization images''', total=__snake_case )
with open(f'{class_data_dir}/caption.txt', '''w''' ) as fa, open(f'{class_data_dir}/urls.txt', '''w''' ) as fa, open(
f'{class_data_dir}/images.txt', '''w''' ) as fa:
while total < num_class_images:
UpperCAmelCase_ :Optional[Any] = class_images[count]
count += 1
try:
UpperCAmelCase_ :Union[str, Any] = requests.get(images['''url'''] )
if img.status_code == 200:
UpperCAmelCase_ :int = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg', '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'{class_data_dir}/images/{total}.jpg' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a ( ):
'''simple docstring'''
UpperCAmelCase_ :Optional[Any] = argparse.ArgumentParser('''''', add_help=__snake_case )
parser.add_argument('''--class_prompt''', help='''text prompt to retrieve images''', required=__snake_case, type=__snake_case )
parser.add_argument('''--class_data_dir''', help='''path to save images''', required=__snake_case, type=__snake_case )
parser.add_argument('''--num_class_images''', help='''number of images to download''', default=200, type=__snake_case )
return parser.parse_args()
if __name__ == "__main__":
__lowerCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 608
|
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ =(CMStochasticIterativeScheduler,)
UpperCamelCase__ =10
def snake_case_ ( self : Tuple , **snake_case : Any ):
UpperCAmelCase_ :Tuple = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**snake_case )
return config
def snake_case_ ( self : int ):
UpperCAmelCase_ :List[Any] = 10
UpperCAmelCase_ :Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase_ :Any = self.scheduler_classes[0](**snake_case )
scheduler.set_timesteps(snake_case )
UpperCAmelCase_ :Union[str, Any] = scheduler.timesteps[0]
UpperCAmelCase_ :int = scheduler.timesteps[1]
UpperCAmelCase_ :Dict = self.dummy_sample
UpperCAmelCase_ :Dict = 0.1 * sample
UpperCAmelCase_ :List[str] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
UpperCAmelCase_ :List[str] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self : Union[str, Any] ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def snake_case_ ( self : Dict ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case )
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ :List[str] = self.get_scheduler_config()
UpperCAmelCase_ :Any = scheduler_class(**snake_case )
UpperCAmelCase_ :Tuple = 1
scheduler.set_timesteps(snake_case )
UpperCAmelCase_ :Union[str, Any] = scheduler.timesteps
UpperCAmelCase_ :Tuple = torch.manual_seed(0 )
UpperCAmelCase_ :Dict = self.dummy_model()
UpperCAmelCase_ :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case ):
# 1. scale model input
UpperCAmelCase_ :int = scheduler.scale_model_input(snake_case , snake_case )
# 2. predict noise residual
UpperCAmelCase_ :Optional[int] = model(snake_case , snake_case )
# 3. predict previous sample x_t-1
UpperCAmelCase_ :List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
UpperCAmelCase_ :Dict = pred_prev_sample
UpperCAmelCase_ :Any = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase_ :Tuple = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ :Tuple = self.get_scheduler_config()
UpperCAmelCase_ :Dict = scheduler_class(**snake_case )
UpperCAmelCase_ :Union[str, Any] = [106, 0]
scheduler.set_timesteps(timesteps=snake_case )
UpperCAmelCase_ :Union[str, Any] = scheduler.timesteps
UpperCAmelCase_ :Dict = torch.manual_seed(0 )
UpperCAmelCase_ :Optional[Any] = self.dummy_model()
UpperCAmelCase_ :int = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCAmelCase_ :str = scheduler.scale_model_input(snake_case , snake_case )
# 2. predict noise residual
UpperCAmelCase_ :Tuple = model(snake_case , snake_case )
# 3. predict previous sample x_t-1
UpperCAmelCase_ :Optional[int] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
UpperCAmelCase_ :int = pred_prev_sample
UpperCAmelCase_ :List[str] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase_ :int = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ :int = self.get_scheduler_config()
UpperCAmelCase_ :str = scheduler_class(**snake_case )
UpperCAmelCase_ :int = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=snake_case )
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ :List[str] = self.get_scheduler_config()
UpperCAmelCase_ :List[Any] = scheduler_class(**snake_case )
UpperCAmelCase_ :Dict = [39, 30, 12, 1, 0]
UpperCAmelCase_ :str = len(snake_case )
with self.assertRaises(snake_case , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Dict = self.scheduler_classes[0]
UpperCAmelCase_ :int = self.get_scheduler_config()
UpperCAmelCase_ :int = scheduler_class(**snake_case )
UpperCAmelCase_ :Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=snake_case )
| 608
| 1
|
'''simple docstring'''
import string
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =''
for i in sequence:
_lowerCAmelCase =ord(lowerCAmelCase__ )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =string.ascii_letters
_lowerCAmelCase =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase__ )] if c in letters else c for c in sequence )
def UpperCamelCase__ ( ):
'''simple docstring'''
from timeit import timeit
print('Running performance benchmarks...' )
_lowerCAmelCase ='from string import printable ; from __main__ import atbash, atbash_slow'
print(F'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowerCAmelCase__ )} seconds''' )
print(F'''> atbash(): {timeit('atbash(printable)' , setup=lowerCAmelCase__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 708
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 0
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : int = "▁"
_lowerCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
_lowerCAmelCase : Any = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
_lowerCAmelCase : List[str] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
_lowerCAmelCase : Union[str, Any] = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
_lowerCAmelCase : str = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ["input_ids"]
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = RESOURCE_FILES_NAMES
def __init__( self : List[str] , A : Tuple , A : Union[str, Any]=None , A : Union[str, Any]=False , A : Optional[int]="utf8" , A : List[str]="[UNK]" , A : Union[str, Any]="[SEP]" , A : Union[str, Any]="[PAD]" , A : Dict="[CLS]" , A : int="[MASK]" , A : Optional[Dict[str, Any]] = None , **A : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , vocab_file=A , encoding=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = do_lower_case
_UpperCAmelCase : Optional[int] = sentencepiece_model_ckpt
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_UpperCAmelCase : List[str] = self.load_vocab(filepath=A )
else:
_UpperCAmelCase : Optional[Any] = {self.sp_model.id_to_piece(A ): id for id in range(self.sp_model.get_piece_size() )}
_UpperCAmelCase : Tuple = {v: k for k, v in self.vocab.items()}
def snake_case_ ( self : Any , A : int ):
if text is None:
return None
_UpperCAmelCase : Tuple = self.tokenize(A )
_UpperCAmelCase , _UpperCAmelCase : Dict = "", []
for i, ch in enumerate(A ):
if ch in self.SP_CHAR_MAPPING:
_UpperCAmelCase : Optional[Any] = self.SP_CHAR_MAPPING.get(A )
else:
_UpperCAmelCase : str = unicodedata.normalize("NFKC" , A )
if self.is_whitespace(A ):
continue
normalized_text += ch
char_mapping.extend([i] * len(A ) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = normalized_text, [], 0
if self.do_lower_case:
_UpperCAmelCase : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
_UpperCAmelCase : Any = token[1:]
_UpperCAmelCase : List[str] = text[offset:].index(A ) + offset
_UpperCAmelCase : Optional[Any] = start + len(A )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_UpperCAmelCase : str = end
return token_mapping
@property
def snake_case_ ( self : int ):
return len(self.vocab )
def snake_case_ ( self : int ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
return state
def __setstate__( self : str , A : Tuple ):
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : str = {}
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case_ ( self : Union[str, Any] , A : Tuple ):
return "".join((self.SP_CHAR_MAPPING.get(A , A ) for c in text) )
def snake_case_ ( self : Union[str, Any] , A : Dict , A : Union[str, Any]=False , A : Tuple=6_4 , A : str=0.1 ):
if self.sp_model_kwargs.get("enable_sampling" ) is True:
_UpperCAmelCase : List[str] = True
if self.sp_model_kwargs.get("alpha" ) is not None:
_UpperCAmelCase : Dict = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
_UpperCAmelCase : Any = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
_UpperCAmelCase : Optional[Any] = self.sp_model.EncodeAsPieces(A )
else:
_UpperCAmelCase : List[str] = self.sp_model.SampleEncodeAsPieces(A , A , A )
_UpperCAmelCase : List[str] = []
for pi, piece in enumerate(A ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(A ) and pi != 0:
new_pieces.append(A )
continue
else:
continue
_UpperCAmelCase : List[Any] = 0
for i, chunk in enumerate(A ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(A ) or self.is_punct(A ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(A )
_UpperCAmelCase : Union[str, Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_UpperCAmelCase : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_UpperCAmelCase : Optional[Any] = i
if len(A ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case_ ( self : List[str] , A : List[str] ):
_UpperCAmelCase : int = "".join(A ).replace(A , " " ).strip()
return out_string
def snake_case_ ( self : Union[str, Any] , A : List[Any] ):
_UpperCAmelCase : Any = self.convert_ids_to_tokens(A )
_UpperCAmelCase : Union[str, Any] = "".join(A ).replace(A , " " ).strip()
return out_string
def snake_case_ ( self : List[Any] , A : List[Any] ):
return self.vocab.get(A , self.vocab.get(self.unk_token ) )
def snake_case_ ( self : Dict , A : Any ):
return self.reverse_vocab.get(A , self.unk_token )
def snake_case_ ( self : Optional[Any] , A : int , A : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Dict = [self.cls_token_id]
_UpperCAmelCase : Dict = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case_ ( self : Union[str, Any] , A : Optional[Any] , A : Any=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case_ ( self : List[Any] , A : Tuple , A : Dict=None , A : Tuple=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def snake_case_ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(A ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(A ) + 1) + [1] * (len(A ) + 3)
def snake_case_ ( self : List[str] , A : Union[str, Any] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case_ ( self : Optional[Any] , A : Tuple ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case_ ( self : Optional[int] , A : Optional[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case_ ( self : str , A : Union[str, Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(A ) == 1:
_UpperCAmelCase : Tuple = unicodedata.category(A )
if cat == "Zs":
return True
return False
def snake_case_ ( self : Any , A : List[Any] ):
_UpperCAmelCase : Tuple = {}
with io.open(A , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(A ):
_UpperCAmelCase : Any = line.rstrip("\n" )
_UpperCAmelCase : Optional[int] = int(A )
return token_to_idx
def snake_case_ ( self : Optional[Any] , A : str , A : Optional[str] = None ):
_UpperCAmelCase : Optional[int] = 0
if os.path.isdir(A ):
_UpperCAmelCase : Any = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
_UpperCAmelCase : int = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(A , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
_UpperCAmelCase : Optional[int] = token_index
writer.write(token + "\n" )
index += 1
_UpperCAmelCase : Union[str, Any] = os.path.join(A , "sentencepiece.bpe.model" )
with open(A , "wb" ) as fi:
_UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(A )
return (vocab_file,)
| 289
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __snake_case ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
_UpperCAmelCase : int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert("RGB" )
return image
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Tuple = val
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCAmelCase : List[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_UpperCAmelCase : List[str] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_UpperCAmelCase : List[Any] = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE__ , requires_grad=SCREAMING_SNAKE_CASE__ ), v_bias) )
_UpperCAmelCase : Union[str, Any] = qkv_bias
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = 364 if "coco" in model_name else 224
_UpperCAmelCase : Dict = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_UpperCAmelCase : Any = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict()
elif "opt-6.7b" in model_name:
_UpperCAmelCase : List[Any] = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict()
elif "t5-xl" in model_name:
_UpperCAmelCase : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCAmelCase : int = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
_UpperCAmelCase : Optional[Any] = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE__ , text_config=SCREAMING_SNAKE_CASE__ )
return config, image_size
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
_UpperCAmelCase : Optional[int] = tokenizer("\n" , add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids[0]
_UpperCAmelCase , _UpperCAmelCase : int = get_blipa_config(SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval()
_UpperCAmelCase : Optional[int] = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
_UpperCAmelCase , _UpperCAmelCase : str = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
_UpperCAmelCase : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE__ , model_type=SCREAMING_SNAKE_CASE__ , is_eval=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
original_model.eval()
print("Done!" )
# update state dict keys
_UpperCAmelCase : Optional[Any] = original_model.state_dict()
_UpperCAmelCase : Union[str, Any] = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCAmelCase : Tuple = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if key.startswith("Qformer.bert" ):
_UpperCAmelCase : List[Any] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
_UpperCAmelCase : Dict = key.replace("self" , "attention" )
if "opt_proj" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
_UpperCAmelCase : Optional[Any] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
_UpperCAmelCase : Dict = key.replace("opt" , "language" )
if key.startswith("t5" ):
_UpperCAmelCase : Union[str, Any] = key.replace("t5" , "language" )
_UpperCAmelCase : List[str] = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_UpperCAmelCase : Dict = load_demo_image()
_UpperCAmelCase : List[Any] = vis_processors["eval"](SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE__ )
# create processor
_UpperCAmelCase : Any = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values.to(SCREAMING_SNAKE_CASE__ )
# make sure processor creates exact same pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
original_model.to(SCREAMING_SNAKE_CASE__ )
hf_model.to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
if "opt" in model_name:
_UpperCAmelCase : str = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
_UpperCAmelCase : Optional[int] = hf_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).logits
else:
_UpperCAmelCase : List[Any] = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
_UpperCAmelCase : List[str] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
_UpperCAmelCase : Union[str, Any] = hf_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_UpperCAmelCase : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=SCREAMING_SNAKE_CASE__ )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_UpperCAmelCase : Any = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=SCREAMING_SNAKE_CASE__ )
else:
# cast to same type
_UpperCAmelCase : List[str] = logits.dtype
assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
_UpperCAmelCase : int = ""
_UpperCAmelCase : Any = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = original_model.generate({"image": original_pixel_values} )
_UpperCAmelCase : int = hf_model.generate(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = input_ids.shape[1]
_UpperCAmelCase : List[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = [text.strip() for text in output_text]
print("HF generation:" , SCREAMING_SNAKE_CASE__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
_lowerCAmelCase : List[Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowerCAmelCase : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 289
| 1
|
import os
import string
import sys
lowercase__ : List[Any] = 1 << 8
lowercase__ : int = {
'tab': ord("\t"),
'newline': ord("\r"),
'esc': 2_7,
'up': 6_5 + ARROW_KEY_FLAG,
'down': 6_6 + ARROW_KEY_FLAG,
'right': 6_7 + ARROW_KEY_FLAG,
'left': 6_8 + ARROW_KEY_FLAG,
'mod_int': 9_1,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 5_0,
'delete': 5_1,
'pg_up': 5_3,
'pg_down': 5_4,
}
lowercase__ : List[Any] = KEYMAP['up']
lowercase__ : int = KEYMAP['left']
if sys.platform == "win32":
lowercase__ : str = []
lowercase__ : List[str] = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(1_0):
lowercase__ : Dict = ord(str(i))
def A_ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__UpperCamelCase = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_lowerCAmelCase ) == 0:
# Read the keystroke
__UpperCamelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__UpperCamelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__UpperCamelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(_lowerCAmelCase )
if ord(_lowerCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__UpperCamelCase = chr(KEYMAP['''esc'''] )
except KeyError:
__UpperCamelCase = cha[1]
else:
__UpperCamelCase = ch.decode(_lowerCAmelCase )
else:
__UpperCamelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__UpperCamelCase = sys.stdin.fileno()
__UpperCamelCase = termios.tcgetattr(_lowerCAmelCase )
try:
tty.setraw(_lowerCAmelCase )
__UpperCamelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_lowerCAmelCase , termios.TCSADRAIN , _lowerCAmelCase )
return ch
def A_ ( ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase = get_raw_chars()
if ord(_lowerCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_lowerCAmelCase ) == KEYMAP["esc"]:
__UpperCamelCase = get_raw_chars()
if ord(_lowerCAmelCase ) == KEYMAP["mod_int"]:
__UpperCamelCase = get_raw_chars()
if ord(_lowerCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowerCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_lowerCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 711
|
from collections import Counter
from timeit import timeit
def A_ ( snake_case : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def A_ ( snake_case : str = "" ) -> bool:
'''simple docstring'''
if len(snake_case ) == 0:
return True
__UpperCamelCase = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__UpperCamelCase = {}
for character in lower_case_input_str:
__UpperCamelCase = character_freq_dict.get(snake_case , 0 ) + 1
__UpperCamelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A_ ( snake_case : str = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , snake_case , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(snake_case ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(snake_case ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
lowercase__ : Tuple = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
lowercase__ : Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 451
| 0
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase : List[Any] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]=None) -> List[str]:
'''simple docstring'''
if subparsers is not None:
__UpperCamelCase : int = subparsers.add_parser("tpu-config" , description=_description)
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description)
# Core arguments
__UpperCamelCase : Optional[Any] = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`.")
config_args.add_argument(
"--config_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowerCamelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowerCamelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
__UpperCamelCase : Tuple = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU.")
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowerCamelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it.")
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase)
return parser
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> Dict:
'''simple docstring'''
__UpperCamelCase : List[str] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCamelCase):
__UpperCamelCase : Dict = load_config_from_file(args.config_file)
if not args.command_file and defaults.command_file is not None and not args.command:
__UpperCamelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
__UpperCamelCase : int = defaults.commands
if not args.tpu_name:
__UpperCamelCase : int = defaults.tpu_name
if not args.tpu_zone:
__UpperCamelCase : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
__UpperCamelCase : List[str] = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
__UpperCamelCase : Optional[int] = "accelerate -U"
elif isinstance(parse(args.accelerate_version) , _lowerCamelCase):
__UpperCamelCase : Union[str, Any] = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod.")
if args.command_file:
with open(args.command_file , "r") as f:
__UpperCamelCase : List[Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowerCamelCase):
__UpperCamelCase : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__UpperCamelCase : str = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
__UpperCamelCase : str = "; ".join(_lowerCamelCase)
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__UpperCamelCase : List[Any] = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(_lowerCamelCase)}')
return
subprocess.run(_lowerCamelCase)
print("Successfully setup pod.")
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = tpu_command_parser()
__UpperCamelCase : Optional[Any] = parser.parse_args()
tpu_command_launcher(_lowerCamelCase)
| 557
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Tuple = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 557
| 1
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger('''transformers.models.encodec''')
SCREAMING_SNAKE_CASE__ = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
SCREAMING_SNAKE_CASE__ = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
SCREAMING_SNAKE_CASE__ = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
SCREAMING_SNAKE_CASE__ = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
SCREAMING_SNAKE_CASE__ = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
SCREAMING_SNAKE_CASE__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
SCREAMING_SNAKE_CASE__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
for attribute in key.split('.' ):
A__ = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
A__ = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
A__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
elif weight_type == "running_mean":
A__ = value
elif weight_type == "running_var":
A__ = value
elif weight_type == "num_batches_tracked":
A__ = value
elif weight_type == "weight_ih_l0":
A__ = value
elif weight_type == "weight_hh_l0":
A__ = value
elif weight_type == "bias_ih_l0":
A__ = value
elif weight_type == "bias_hh_l0":
A__ = value
elif weight_type == "weight_ih_l1":
A__ = value
elif weight_type == "weight_hh_l1":
A__ = value
elif weight_type == "bias_ih_l1":
A__ = value
elif weight_type == "bias_hh_l1":
A__ = value
else:
A__ = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
A__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
A__ = MAPPING_24K
elif model_name == "encodec_48khz":
A__ = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(__UpperCamelCase , __UpperCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
A__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A__ , A__ = key.split('.*.' )
if prefix in name and suffix in name:
A__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
A__ = True
if "*" in mapped_key:
A__ = name.split(__UpperCamelCase )[0].split('.' )[-2]
A__ = mapped_key.replace('*' , __UpperCamelCase )
if "weight_g" in name:
A__ = 'weight_g'
elif "weight_v" in name:
A__ = 'weight_v'
elif "weight_ih_l0" in name:
A__ = 'weight_ih_l0'
elif "weight_hh_l0" in name:
A__ = 'weight_hh_l0'
elif "bias_ih_l0" in name:
A__ = 'bias_ih_l0'
elif "bias_hh_l0" in name:
A__ = 'bias_hh_l0'
elif "weight_ih_l1" in name:
A__ = 'weight_ih_l1'
elif "weight_hh_l1" in name:
A__ = 'weight_hh_l1'
elif "bias_ih_l1" in name:
A__ = 'bias_ih_l1'
elif "bias_hh_l1" in name:
A__ = 'bias_hh_l1'
elif "bias" in name:
A__ = 'bias'
elif "weight" in name:
A__ = 'weight'
elif "running_mean" in name:
A__ = 'running_mean'
elif "running_var" in name:
A__ = 'running_var'
elif "num_batches_tracked" in name:
A__ = 'num_batches_tracked'
else:
A__ = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ) -> int:
if config_path is not None:
A__ = EncodecConfig.from_pretrained(__UpperCamelCase )
else:
A__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A__ = [8, 5, 4, 4]
A__ = [2.2]
A__ = 64
A__ = 32_000
A__ = 2_048
A__ = False
A__ = False
A__ = False
elif model_name == "encodec_48khz":
A__ = [8, 5, 4, 2]
A__ = [3.0, 6.0, 12.0, 24.0]
A__ = 48_000
A__ = 2
A__ = False
A__ = 'time_group_norm'
A__ = True
A__ = 1.0
A__ = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
A__ = EncodecModel(__UpperCamelCase )
A__ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__UpperCamelCase )
A__ = torch.load(__UpperCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A__ = original_checkpoint['best_state']
recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 707
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCamelCase = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ):
A_ : str = True
while ask_again:
A_ : str = input(SCREAMING_SNAKE_CASE )
try:
if default is not None and len(SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[] , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=0 ):
A_ : Any = BulletMenu(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = menu.run(default_choice=SCREAMING_SNAKE_CASE )
return convert_value(SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : str = int(SCREAMING_SNAKE_CASE )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : int = int(SCREAMING_SNAKE_CASE )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = int(SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[Any] = int(SCREAMING_SNAKE_CASE )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Any = int(SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return {"yes": True, "no": False}[value.lower()]
class _lowerCamelCase ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : List[str] = super()._format_usage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 590
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 590
| 1
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {'vocab_file': 'vocab.txt'}
__UpperCamelCase : int = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__UpperCamelCase : List[str] = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def snake_case ( lowerCamelCase ):
'''simple docstring'''
with open(lowerCamelCase , """r""" ) as f:
__lowercase = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Any = VOCAB_FILES_NAMES
__snake_case :str = PRETRAINED_VOCAB_FILES_MAP
__snake_case :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any="<unk>" , _lowerCAmelCase : Dict="<cls>" , _lowerCAmelCase : Dict="<pad>" , _lowerCAmelCase : Optional[Any]="<mask>" , _lowerCAmelCase : int="<eos>" , **_lowerCAmelCase : Optional[Any] , ) -> str:
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
__lowercase = load_vocab_file(UpperCAmelCase__ )
__lowercase = dict(enumerate(self.all_tokens ) )
__lowercase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
__lowercase = unk_token
__lowercase = cls_token
__lowercase = pad_token
__lowercase = mask_token
__lowercase = eos_token
__lowercase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _a ( self : int , _lowerCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return self._id_to_token.get(UpperCAmelCase__ , self.unk_token )
def _a ( self : Optional[Any] , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
return self._token_to_id.get(UpperCAmelCase__ , self._token_to_id.get(self.unk_token ) )
def _a ( self : Optional[int] , _lowerCAmelCase : int , **_lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
return text.split()
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[int]=False ) -> Dict:
"""simple docstring"""
return len(self._id_to_token )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def _a ( self : List[Any] , _lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self._token_to_id.get(UpperCAmelCase__ , self._token_to_id.get(self.unk_token ) )
def _a ( self : str , _lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
return self._id_to_token.get(UpperCAmelCase__ , self.unk_token )
def _a ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[Any]:
"""simple docstring"""
__lowercase = [self.cls_token_id]
__lowercase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _a ( self : List[str] , _lowerCAmelCase : List , _lowerCAmelCase : Optional[List] = None , _lowerCAmelCase : bool = False ) -> Optional[Any]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__lowercase = [1] + ([0] * len(UpperCAmelCase__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase__ ) + [1]
return mask
def _a ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
__lowercase = os.path.join(UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(UpperCAmelCase__ , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def _a ( self : Tuple ) -> str:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=UpperCAmelCase__ )
def _a ( self : List[Any] , _lowerCAmelCase : Union[List[str], List[AddedToken]] , _lowerCAmelCase : bool = False ) -> str:
"""simple docstring"""
return super()._add_tokens(UpperCAmelCase__ , special_tokens=UpperCAmelCase__ )
| 717
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_A : Union[str, Any] = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : str = """facebook/nllb-200-distilled-600M"""
lowerCamelCase__ : Optional[Any] = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
lowerCamelCase__ : Union[str, Any] = """translator"""
lowerCamelCase__ : List[str] = AutoTokenizer
lowerCamelCase__ : Any = AutoModelForSeqaSeqLM
lowerCamelCase__ : Tuple = LANGUAGE_CODES
lowerCamelCase__ : str = ["""text""", """text""", """text"""]
lowerCamelCase__ : Tuple = ["""text"""]
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
SCREAMING_SNAKE_CASE__ = self.lang_to_code[src_lang]
SCREAMING_SNAKE_CASE__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A_ , return_tensors='''pt''' , src_lang=A_ , tgt_lang=A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
return self.model.generate(**A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A_ )
| 100
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , snake_case_ : int ):
"""simple docstring"""
A : list[list[Edge]] = [[] for _ in range(snake_case_ )]
A : Union[str, Any] = size
def __getitem__( self : Any , snake_case_ : int ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return self._size
def _UpperCAmelCase ( self : Tuple , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(snake_case_ , snake_case_ ) )
def _UpperCAmelCase ( self : str , snake_case_ : int , snake_case_ : int ):
"""simple docstring"""
A : Tuple = deque([start_vertex] )
A : list[int | None] = [None] * self.size
A : str = 0
while queue:
A : Optional[Any] = queue.popleft()
A : List[str] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A : Tuple = current_distance + edge.weight
A : Dict = distances[edge.destination_vertex]
if (
isinstance(snake_case_ , snake_case_ )
and new_distance >= dest_vertex_distance
):
continue
A : Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : List[Any] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase=False )-> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowercase = ''''''
else:
lowercase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowercase = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[
: config.hidden_size, :
]
lowercase = in_proj_bias[: config.hidden_size]
lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase = in_proj_weight[
-config.hidden_size :, :
]
lowercase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( UpperCAmelCase )-> Any:
"""simple docstring"""
lowercase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase, UpperCAmelCase )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> Any:
"""simple docstring"""
lowercase = dct.pop(UpperCAmelCase )
lowercase = val
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase = Image.open(requests.get(UpperCAmelCase, stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> Any:
"""simple docstring"""
lowercase = ViTConfig()
lowercase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowercase = True
lowercase = int(vit_name[-12:-10] )
lowercase = int(vit_name[-9:-6] )
else:
lowercase = 1000
lowercase = '''huggingface/label-files'''
lowercase = '''imagenet-1k-id2label.json'''
lowercase = json.load(open(hf_hub_download(UpperCAmelCase, UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) )
lowercase = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = int(vit_name[-6:-4] )
lowercase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowercase = 192
lowercase = 768
lowercase = 12
lowercase = 3
elif vit_name[9:].startswith('''small''' ):
lowercase = 384
lowercase = 1536
lowercase = 12
lowercase = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowercase = 768
lowercase = 2304
lowercase = 8
lowercase = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowercase = 1024
lowercase = 4096
lowercase = 24
lowercase = 16
elif vit_name[4:].startswith('''huge''' ):
lowercase = 1280
lowercase = 5120
lowercase = 32
lowercase = 16
# load original model from timm
lowercase = timm.create_model(UpperCAmelCase, pretrained=UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCAmelCase )
lowercase = create_rename_keys(UpperCAmelCase, UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase = ViTModel(UpperCAmelCase ).eval()
else:
lowercase = ViTForImageClassification(UpperCAmelCase ).eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowercase = DeiTImageProcessor(size=config.image_size )
else:
lowercase = ViTImageProcessor(size=config.image_size )
lowercase = image_processor(images=prepare_img(), return_tensors='''pt''' )
lowercase = encoding['''pixel_values''']
lowercase = model(UpperCAmelCase )
if base_model:
lowercase = timm_model.forward_features(UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCAmelCase, outputs.pooler_output, atol=1e-3 )
else:
lowercase = timm_model(UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 604
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_SCREAMING_SNAKE_CASE : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : int = 1_0000
lowerCAmelCase_ : Optional[List[str]] = None
lowerCAmelCase_ : Optional[datasets.Features] = None
class _snake_case ( datasets.ArrowBasedBuilder ):
lowerCAmelCase_ : Tuple = ParquetConfig
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
snake_case_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
snake_case_ = data_files
if isinstance(a__ , a__ ):
snake_case_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
snake_case_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , "rb" ) as f:
snake_case_ = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase__ ( self , a__ ) -> pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case_ = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , "rb" ) as f:
snake_case_ = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
snake_case_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(a__ )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(a__ )}: {e}' )
raise
| 400
| 0
|
"""simple docstring"""
from pathlib import Path
import fire
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
"""simple docstring"""
__UpperCAmelCase : Dict = Path(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = Path(UpperCAmelCase_ )
dest_dir.mkdir(exist_ok=UpperCAmelCase_ )
for path in src_dir.iterdir():
__UpperCAmelCase : List[str] = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase : Optional[Any] = dest_dir.joinpath(path.name )
print(UpperCAmelCase_ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCAmelCase_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 374
|
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=0 ) ->Dict:
"""simple docstring"""
return sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x[column] )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=float('''inf''' ) ) ->str:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase : Tuple = current_dis
return min_dis
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=float('''inf''' ) ) ->str:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , UpperCAmelCase_ ):
for j in range(max(0 , i - 6 ) , UpperCAmelCase_ ):
__UpperCAmelCase : Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase : Tuple = current_dis
return min_dis
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(UpperCAmelCase_ , UpperCAmelCase_ )
# recursion
__UpperCAmelCase : Any = points_counts // 2
__UpperCAmelCase : Any = closest_pair_of_points_sqr(
UpperCAmelCase_ , points_sorted_on_y[:mid] , UpperCAmelCase_ )
__UpperCAmelCase : Tuple = closest_pair_of_points_sqr(
UpperCAmelCase_ , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCAmelCase : List[Any] = min(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : int = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = dis_between_closest_in_strip(
UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return min(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
"""simple docstring"""
__UpperCAmelCase : str = column_based_sort(UpperCAmelCase_ , column=0 )
__UpperCAmelCase : Any = column_based_sort(UpperCAmelCase_ , column=1 )
return (
closest_pair_of_points_sqr(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
) ** 0.5
if __name__ == "__main__":
lowercase__ :Optional[Any] = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 374
| 1
|
"""simple docstring"""
def __a ( A ) -> Any:
'''simple docstring'''
A__ = []
A__ = set({"(", "[", "{"} )
A__ = set({")", "]", "}"} )
A__ = {"{": "}", "[": "]", "(": ")"}
for i in range(len(A ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(A ) == 0 or (len(A ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(A ) == 0
def __a ( ) -> Optional[int]:
'''simple docstring'''
A__ = input("Enter sequence of brackets: " )
if is_balanced(A ):
print(A , "is balanced" )
else:
print(A , "is not balanced" )
if __name__ == "__main__":
main()
| 337
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 337
| 1
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[] ):
"""simple docstring"""
lowercase__ = size[0] - overlap_pixels * 2
lowercase__ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowercase__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
lowercase__ = np.pad(SCREAMING_SNAKE_CASE , mode='''linear_ramp''' , pad_width=SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
lowercase__ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowercase__ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowercase__ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowercase__ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return max(SCREAMING_SNAKE_CASE , min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowercase__ = clamp_rect(SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowercase__ = tile.crop(SCREAMING_SNAKE_CASE )
return tile
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = n % d
return n - divisor
class _a ( UpperCamelCase__ ):
def __init__( self: List[Any] , UpperCamelCase_: AutoencoderKL , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase_: int = 350 , ) -> str:
"""simple docstring"""
super().__init__(
vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , max_noise_level=UpperCamelCase_ , )
def lowerCamelCase_ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Any , **UpperCamelCase_: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowercase__ = add_overlap_rect(UpperCamelCase_ , UpperCamelCase_ , image.size )
lowercase__ = image.crop(UpperCamelCase_ )
lowercase__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowercase__ = translated_slice_x - (original_image_slice / 2)
lowercase__ = max(0 , UpperCamelCase_ )
lowercase__ = squeeze_tile(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = to_input.size
lowercase__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowercase__ = super(UpperCamelCase_ , self ).__call__(image=UpperCamelCase_ , **UpperCamelCase_ ).images[0]
lowercase__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowercase__ = unsqueeze_tile(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowercase__ = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
lowercase__ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCamelCase_ ) , mode='''L''' , )
final_image.paste(
UpperCamelCase_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCamelCase_ )
@torch.no_grad()
def __call__( self: Optional[int] , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 75 , UpperCamelCase_: float = 9.0 , UpperCamelCase_: int = 50 , UpperCamelCase_: Optional[Union[str, List[str]]] = None , UpperCamelCase_: Optional[int] = 1 , UpperCamelCase_: float = 0.0 , UpperCamelCase_: Optional[torch.Generator] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 128 , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 32 , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
lowercase__ = math.ceil(image.size[0] / tile_size )
lowercase__ = math.ceil(image.size[1] / tile_size )
lowercase__ = tcx * tcy
lowercase__ = 0
for y in range(UpperCamelCase_ ):
for x in range(UpperCamelCase_ ):
self._process_tile(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , prompt=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , noise_level=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , eta=UpperCamelCase_ , generator=UpperCamelCase_ , latents=UpperCamelCase_ , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def _a ( ):
"""simple docstring"""
lowercase__ = '''stabilityai/stable-diffusion-x4-upscaler'''
lowercase__ = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE , revision='''fp16''' , torch_dtype=torch.floataa )
lowercase__ = pipe.to('''cuda''' )
lowercase__ = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(SCREAMING_SNAKE_CASE ):
print(f'progress: {obj["progress"]:.4f}' )
obj["image"].save('''diffusers_library_progress.jpg''' )
lowercase__ = pipe(image=SCREAMING_SNAKE_CASE , prompt='''Black font, white background, vector''' , noise_level=40 , callback=SCREAMING_SNAKE_CASE )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 429
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase = 16
lowerCAmelCase = 32
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 ):
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
lowercase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase = mocked_dataloaders # noqa: F811
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , SCREAMING_SNAKE_CASE ) == "1":
lowercase__ = 2
# Initialize accelerator
lowercase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config['''lr''']
lowercase__ = int(config['''num_epochs'''] )
lowercase__ = int(config['''seed'''] )
lowercase__ = int(config['''batch_size'''] )
lowercase__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowercase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ = model(**SCREAMING_SNAKE_CASE )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowercase__ = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ = parser.parse_args()
lowercase__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 429
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(SCREAMING_SNAKE_CASE__ ):
_lowerCAmelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(SCREAMING_SNAKE_CASE__ ):
_lowerCAmelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_lowerCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCAmelCase_ : Union[str, Any] ):
return model(**SCREAMING_SNAKE_CASE__ )
eval(**SCREAMING_SNAKE_CASE__ ).block_until_ready()
@slow
def __lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
_lowerCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCAmelCase_ : Dict ):
return model(**SCREAMING_SNAKE_CASE__ )
eval(**SCREAMING_SNAKE_CASE__ ).block_until_ready()
def __lowerCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCAmelCase = FlaxAutoModel.from_pretrained('bert-base' )
def __lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCAmelCase = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ , revision='aaaaaa' )
def __lowerCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
_lowerCAmelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'Use `from_pt=True` to load this model' ):
_lowerCAmelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 580
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 384}
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase__ = crop_pct if crop_pct is not None else 224 / 256
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
lowerCAmelCase__ = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase__ = int(shortest_edge / crop_pct )
lowerCAmelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE__ , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE__ , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> List[Any]:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Dict , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , crop_pct=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 61
| 0
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ):
super().__init__()
UpperCAmelCase_ : Tuple = pad_token_id
UpperCAmelCase_ : Union[str, Any] = max_length
UpperCAmelCase_ : Optional[int] = vocab
UpperCAmelCase_ : str = merges
UpperCAmelCase_ : Any = BytePairTokenizer(_snake_case ,_snake_case ,sequence_length=_snake_case )
@classmethod
def UpperCamelCase__ ( cls ,_snake_case ,*_snake_case ,**_snake_case ):
UpperCAmelCase_ : Dict = [" ".join(_snake_case ) for m in tokenizer.bpe_ranks.keys()]
UpperCAmelCase_ : Union[str, Any] = tokenizer.get_vocab()
return cls(_snake_case ,_snake_case ,*_snake_case ,**_snake_case )
@classmethod
def UpperCamelCase__ ( cls ,_snake_case ,*_snake_case ,**_snake_case ):
UpperCAmelCase_ : str = GPTaTokenizer.from_pretrained(_snake_case ,*_snake_case ,**_snake_case )
return cls.from_tokenizer(_snake_case ,*_snake_case ,**_snake_case )
@classmethod
def UpperCamelCase__ ( cls ,_snake_case ):
return cls(**_snake_case )
def UpperCamelCase__ ( self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
UpperCAmelCase_ : Tuple = self.tf_tokenizer(_snake_case )
UpperCAmelCase_ : Dict = tf.ones_like(_snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCAmelCase_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = pad_model_inputs(
_snake_case ,max_seq_length=_snake_case ,pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCamelCase = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
UpperCAmelCase_ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
"""simple docstring"""
if exitstatus == 5:
UpperCAmelCase_ : List[Any] = 0
# Doctest custom flag to ignore output.
_lowerCamelCase = doctest.register_optionflag("""IGNORE_RESULT""")
_lowerCamelCase = doctest.OutputChecker
class _snake_case (__SCREAMING_SNAKE_CASE):
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,_snake_case ,_snake_case ,_snake_case )
_lowerCamelCase = CustomOutputChecker
_lowerCamelCase = HfDoctestModule
_lowerCamelCase = HfDocTestParser
| 323
| 1
|
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = {str(digit): digit**5 for digit in range(10)}
def lowerCamelCase__ ( _lowerCamelCase : int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_lowerCamelCase ) )
def lowerCamelCase__ ( ) -> int:
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(_lowerCamelCase ) )
if __name__ == "__main__":
print(solution())
| 549
|
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : list[list[int | float]] ) -> int:
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = len(matrix[0] )
lowerCamelCase_ = min(_lowerCamelCase , _lowerCamelCase )
for row in range(_lowerCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _lowerCamelCase ):
lowerCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(_lowerCamelCase , _lowerCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCamelCase_ = True
for i in range(row + 1 , _lowerCamelCase ):
if matrix[i][row] != 0:
lowerCamelCase_ , lowerCamelCase_ = matrix[i], matrix[row]
lowerCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(_lowerCamelCase ):
lowerCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549
| 1
|
def A_( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCamelCase__ = generate_large_matrix()
UpperCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A_( A ):
assert all(row == sorted(__snake_case , reverse=__snake_case ) for row in grid )
assert all(list(__snake_case ) == sorted(__snake_case , reverse=__snake_case ) for col in zip(*__snake_case ) )
def A_( A ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase_ = (left + right) // 2
UpperCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase_ = mid + 1
else:
UpperCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__snake_case )
def A_( A ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(grid[0] )
for i in range(len(__snake_case ) ):
UpperCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__snake_case ) * len(grid[0] )) - total
def A_( A ):
return len([number for row in grid for number in row if number < 0] )
def A_( A ):
UpperCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__snake_case ):
if number < 0:
total += len(__snake_case ) - i
break
return total
def A_( ):
from timeit import timeit
print("""Running benchmarks""" )
UpperCAmelCase_ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase_ = timeit(f"""{func}(grid=grid)""" , setup=__snake_case , number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 712
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : int = """▁"""
UpperCamelCase__ : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCamelCase__ : Dict = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCamelCase__ : Optional[int] = {"""vinai/bartpho-syllable""": 10_24}
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : List[str] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[str]="<s>" , __lowercase : str="</s>" , __lowercase : Optional[Any]="</s>" , __lowercase : Any="<s>" , __lowercase : Dict="<unk>" , __lowercase : int="<pad>" , __lowercase : str="<mask>" , __lowercase : Optional[Dict[str, Any]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase_ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = monolingual_vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowercase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowercase ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase_ = cnt
cnt += 1
with open(__lowercase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
UpperCAmelCase_ = line.strip().split()[0]
UpperCAmelCase_ = len(self.fairseq_tokens_to_ids )
if str(__lowercase ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase_ = len(self.fairseq_tokens_to_ids )
UpperCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , __lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def SCREAMING_SNAKE_CASE ( self : Any , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : str , __lowercase : str ):
'''simple docstring'''
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowercase : Tuple ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : Dict , __lowercase : Optional[Any] ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE ( self : Any , __lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = """""".join(__lowercase ).replace(__lowercase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : str , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , """wb""" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowercase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowercase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowercase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(__lowercase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 486
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
# Construct model
if openai_config_file == "":
lowerCamelCase_ = OpenAIGPTConfig()
else:
lowerCamelCase_ = OpenAIGPTConfig.from_json_file(__UpperCamelCase )
lowerCamelCase_ = OpenAIGPTModel(__UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Save pytorch-model
lowerCamelCase_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() ,__UpperCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
A_ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 42
|
import copy
import random
from transformers import CLIPTokenizer
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = {}
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ):
lowerCamelCase : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCamelCase : Dict = []
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase : Any = output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : List[str] = self.token_map[placeholder_token]
lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) )
return text
def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 681
| 0
|
"""simple docstring"""
from maths.prime_check import is_prime
def lowercase_ ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : str = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if is_prime(_snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 545
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Tuple = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = False
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE__ : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : str = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE__ : Dict = embedding_size
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFMobileBertModel(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = TFMobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFMobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFMobileBertForPreTraining(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = TFMobileBertForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE__ : List[Any] = TFMobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Tuple = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = TFMobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = TFMobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : str = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TFMobileBertModelTest.TFMobileBertModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFMobileBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : Dict = model(SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 6, 3_05_22]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
| 545
| 1
|
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : int = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase : List[Any] = []
UpperCamelCase : List[Any] = list(range(snake_case__ ) )
# Find permutation
while factorials:
UpperCamelCase : int = factorials.pop()
UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
UpperCAmelCase : int = [
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
lowercase_ = 0
lowercase_ = 0
while place < len(__lowerCAmelCase ):
if (place + 1 < len(__lowerCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for arabic, roman in ROMAN:
((lowercase_) , (lowercase_)) = divmod(__lowerCAmelCase , __lowerCAmelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_( lowercase__ ):
"""simple docstring"""
__snake_case : Dict =['''image_processor''', '''tokenizer''']
__snake_case : str ='''ViltImageProcessor'''
__snake_case : int =('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : List[str] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[Any]=None , **lowerCAmelCase__ : List[str]) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor')
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
encoding.update(lowerCAmelCase__)
return encoding
def __UpperCamelCase ( self : List[Any] , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Optional[Any]) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__)
def __UpperCamelCase ( self : Tuple , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[Any]) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__)
@property
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __UpperCamelCase ( self : Any) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[str]) -> int:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 707
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCAmelCase = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class a_( unittest.TestCase ):
"""simple docstring"""
def __UpperCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/'))
SCREAMING_SNAKE_CASE = self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase__ , 'src/transformers/models/bert/modeling_bert.py') , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py') , )
def __UpperCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'src/transformers'
shutil.rmtree(self.transformer_dir)
def __UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9)
SCREAMING_SNAKE_CASE = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = os.path.join(self.transformer_dir , 'new_code.py')
with open(lowerCAmelCase__ , 'w' , newline='\n') as f:
f.write(lowerCAmelCase__)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase__)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase__)
with open(lowerCAmelCase__ , 'r') as f:
self.assertTrue(f.read() , lowerCAmelCase__)
def __UpperCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def __UpperCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowerCAmelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowerCAmelCase__) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , lowerCAmelCase__ , lowerCAmelCase__) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowerCAmelCase__ , overwrite_result=re.sub('Bert' , 'TestModel' , lowerCAmelCase__) , )
def __UpperCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = check_copies.LOCALIZED_READMES['README_zh-hans.md']
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
self.assertFalse(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 259
| 0
|
def a ( a ) ->bool:
'''simple docstring'''
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = (1 << p) - 1
for _ in range(p - 2 ):
SCREAMING_SNAKE_CASE = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 201
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def a ( a ) ->List[Any]:
'''simple docstring'''
if hor == 128:
SCREAMING_SNAKE_CASE = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
SCREAMING_SNAKE_CASE = (32, 128, 256)
SCREAMING_SNAKE_CASE = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
SCREAMING_SNAKE_CASE = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
SCREAMING_SNAKE_CASE = (32, 64, 128, 256)
SCREAMING_SNAKE_CASE = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
SCREAMING_SNAKE_CASE = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
SCREAMING_SNAKE_CASE = model.state_dict()
SCREAMING_SNAKE_CASE = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 6_5536,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
SCREAMING_SNAKE_CASE = UNetaDModel(**a )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(a )
hf_value_function.load_state_dict(a )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , '''w''' ) as f:
json.dump(a , a )
def a ( ) ->Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 128, 256),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 6_5536,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
SCREAMING_SNAKE_CASE = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = UNetaDModel(**a )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(a )
hf_value_function.load_state_dict(a )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(a , a )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 201
| 1
|
from numpy import exp, pi, sqrt
def UpperCAmelCase_ ( __a : Dict , __a : int = 0.0 , __a : Dict = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
"""simple docstring"""
def UpperCAmelCase_ ( __a : int = 10_00 ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Dict = 1, 1
_lowerCamelCase : Optional[Any] = 2
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : Optional[Any] = fa + fa
_lowerCamelCase , _lowerCamelCase : Optional[int] = fa, f
index += 1
for _ in str(__a ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 349
| 0
|
class A :
def __init__( self , lowercase_ ) -> None:
'''simple docstring'''
_snake_case : str = set_counts
_snake_case : Union[str, Any] = max(lowercase_ )
_snake_case : List[Any] = len(lowercase_ )
_snake_case : Tuple = [1] * num_sets
_snake_case : Dict = list(range(lowercase_ ) )
def __a ( self , lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
_snake_case : List[Any] = self.get_parent(lowercase_ )
_snake_case : Tuple = self.get_parent(lowercase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_snake_case : List[str] = 0
_snake_case : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_snake_case : Dict = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_snake_case : Union[str, Any] = 0
_snake_case : Optional[int] = src_parent
_snake_case : Tuple = self.set_counts[src_parent]
_snake_case : str = max(self.max_set , lowercase_ )
return True
def __a ( self , lowercase_ ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
_snake_case : Optional[int] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 326
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Any = SamImageProcessor()
__snake_case : Optional[int] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[Any] , **lowerCamelCase : Optional[int] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : int ) -> List[Any]:
__snake_case : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : int = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Optional[int] = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : Dict = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Tuple = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[str] = [torch.ones((1, 3, 5, 5) )]
__snake_case : Tuple = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : int = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : List[str] = [np.ones((1, 3, 5, 5) )]
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : str = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase ):
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
@require_vision
@require_tf
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : int = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : str , **lowerCamelCase : Any ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> Any:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : str ) -> List[Any]:
__snake_case : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Dict = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : int ) -> List[str]:
__snake_case : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
__snake_case : str = self.get_image_processor()
__snake_case : Union[str, Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : int = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : List[str] = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __snake_case ( self : Any ) -> Optional[int]:
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
__snake_case : List[Any] = [[1764, 2646]]
__snake_case : Dict = [[683, 1024]]
__snake_case : List[str] = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , tf.convert_to_tensor(lowerCamelCase ) , tf.convert_to_tensor(lowerCamelCase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
__snake_case : List[str] = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> str:
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[str] , **lowerCamelCase : Any ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : str = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__snake_case : Dict = [tf.convert_to_tensor(lowerCamelCase )]
__snake_case : List[Any] = [torch.tensor(lowerCamelCase )]
__snake_case : Optional[Any] = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : Union[str, Any] = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __snake_case ( self : List[Any] ) -> List[str]:
__snake_case : Any = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : Any = image_processor(lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Optional[Any] = processor(images=lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Tuple = image_processor(lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
__snake_case : List[Any] = processor(images=lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
| 81
| 0
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( _a: Dict , _a: Optional[Any] , _a: str )-> str:
'''simple docstring'''
lowerCamelCase__ = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
lowerCamelCase__ = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 707
|
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659
| 0
|
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "mask2former"
__lowerCamelCase : Optional[Any] = ["swin"]
__lowerCamelCase : str = {"hidden_size": "hidden_dim"}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = 256 , _lowerCAmelCase = 256 , _lowerCAmelCase = 256 , _lowerCAmelCase = 1024 , _lowerCAmelCase = "relu" , _lowerCAmelCase = 6 , _lowerCAmelCase = 10 , _lowerCAmelCase = 8 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 2048 , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = 4 , _lowerCAmelCase = 255 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 2.0 , _lowerCAmelCase = 5.0 , _lowerCAmelCase = 5.0 , _lowerCAmelCase = 12544 , _lowerCAmelCase = 3.0 , _lowerCAmelCase = 0.75 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = True , _lowerCAmelCase = [4, 8, 16, 32] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> List[Any]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
_lowerCAmelCase = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowerCAmelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = backbone_config.pop("model_type" )
_lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase = config_class.from_dict(_lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
_lowerCAmelCase = backbone_config
_lowerCAmelCase = feature_size
_lowerCAmelCase = mask_feature_size
_lowerCAmelCase = hidden_dim
_lowerCAmelCase = encoder_feedforward_dim
_lowerCAmelCase = activation_function
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = dim_feedforward
_lowerCAmelCase = pre_norm
_lowerCAmelCase = enforce_input_projection
_lowerCAmelCase = common_stride
_lowerCAmelCase = ignore_value
_lowerCAmelCase = num_queries
_lowerCAmelCase = no_object_weight
_lowerCAmelCase = class_weight
_lowerCAmelCase = mask_weight
_lowerCAmelCase = dice_weight
_lowerCAmelCase = train_num_points
_lowerCAmelCase = oversample_ratio
_lowerCAmelCase = importance_sample_ratio
_lowerCAmelCase = init_std
_lowerCAmelCase = init_xavier_std
_lowerCAmelCase = use_auxiliary_loss
_lowerCAmelCase = feature_strides
_lowerCAmelCase = output_auxiliary_logits
_lowerCAmelCase = decoder_layers
super().__init__(**_lowerCAmelCase )
@classmethod
def _snake_case ( cls , _lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
return cls(
backbone_config=_lowerCAmelCase , **_lowerCAmelCase , )
def _snake_case ( self ) -> Dict[str, any]:
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.backbone_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
| 18
|
"""simple docstring"""
def UpperCAmelCase ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not len(A ) == len(A ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 573
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''roformer'''
def __init__( self : List[str] , UpperCAmelCase__ : Dict=5_0000 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[Any]=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3072 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=1536 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[Any]=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[Any]=True , **UpperCAmelCase__ : Any , ) ->Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size if embedding_size is None else embedding_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = rotary_value
UpperCAmelCase_ = use_cache
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : str ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 43
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = XLMTokenizer
lowerCAmelCase__ = False
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]:
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ = '''lower'''
UpperCAmelCase_ = ['''low''', '''er</w>''']
UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = tokens + ['''<unk>''']
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 43
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.