code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
__a : Optional[int] = model.config
__a : List[str] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , )
__a : Optional[Any] = MBartConfig(
is_decoder=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowerCAmelCase__ , add_final_layer_norm=lowerCAmelCase__ , )
return encoder_config, decoder_config
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
if "encoder.model" in name:
__a : Optional[int] = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
__a : Union[str, Any] = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
__a : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__a : Optional[int] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
__a : Dict = '''encoder.''' + name
if "attn.proj" in name:
__a : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
__a : Dict = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__a : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__a : str = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__a : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__a : Dict = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__a : List[Any] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
__a : int = '''encoder.layernorm.bias'''
return name
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ):
for key in orig_state_dict.copy().keys():
__a : Optional[Any] = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
__a : int = key.split('''.''' )
__a : Union[str, Any] = int(key_split[3] )
__a : Dict = int(key_split[5] )
__a : Optional[Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a : Optional[Any] = val[:dim, :]
__a : List[Any] = val[dim : dim * 2, :]
__a : str = val[-dim:, :]
else:
__a : Optional[Any] = val[:dim]
__a : List[Any] = val[dim : dim * 2]
__a : Optional[int] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__a : Dict = val
return orig_state_dict
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[int]=False ):
# load original model
__a : List[str] = DonutModel.from_pretrained(lowerCAmelCase__ ).eval()
# load HuggingFace model
__a , __a : int = get_configs(lowerCAmelCase__ )
__a : List[Any] = DonutSwinModel(lowerCAmelCase__ )
__a : Optional[Any] = MBartForCausalLM(lowerCAmelCase__ )
__a : int = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
__a : Dict = original_model.state_dict()
__a : str = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
# verify results on scanned document
__a : str = load_dataset('''hf-internal-testing/example-documents''' )
__a : Any = dataset['''test'''][0]['''image'''].convert('''RGB''' )
__a : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained(lowerCAmelCase__ , from_slow=lowerCAmelCase__ )
__a : int = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__a : List[str] = DonutProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
__a : List[str] = processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__a : Tuple = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__a : List[Any] = '''When is the coffee break?'''
__a : Union[str, Any] = task_prompt.replace('''{user_input}''' , lowerCAmelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__a : List[str] = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__a : Union[str, Any] = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__a : Optional[Any] = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__a : List[Any] = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__a : Union[str, Any] = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
__a : str = original_model.decoder.tokenizer(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors='''pt''' )[
'''input_ids'''
]
__a : Tuple = original_model.encoder.model.patch_embed(lowerCAmelCase__ )
__a , __a : Optional[int] = model.encoder.embeddings(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
# verify encoder hidden states
__a : Optional[int] = original_model.encoder(lowerCAmelCase__ )
__a : List[str] = model.encoder(lowerCAmelCase__ ).last_hidden_state
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-2 )
# verify decoder hidden states
__a : str = original_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).logits
__a : Tuple = model(lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ ).logits
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowercase__ =parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 216
|
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if n_term == "":
return []
__a : list = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f"1/{temp + 1}" if series else '''1''' )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 216
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=3 , A__=30 , A__=400 , A__=True , A__=None , A__=0.9 , A__=None , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , ):
A__ : Any = size if size is not None else {"""shortest_edge""": 30}
A__ : Tuple = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
A__ : Optional[Any] = parent
A__ : Any = batch_size
A__ : Any = num_channels
A__ : Union[str, Any] = min_resolution
A__ : List[str] = max_resolution
A__ : Dict = do_resize_and_center_crop
A__ : int = size
A__ : Optional[Any] = crop_pct
A__ : Optional[int] = crop_size
A__ : Tuple = do_normalize
A__ : Optional[Any] = image_mean
A__ : int = image_std
def __A ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _a (__snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def __A ( self ):
A__ : Any = PoolFormerImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
A__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
self.assertTrue(hasattr(A__ , """crop_pct""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
self.assertTrue(hasattr(A__ , """image_mean""" ) )
self.assertTrue(hasattr(A__ , """image_std""" ) )
def __A ( self ):
A__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
A__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
A__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
A__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : List[Any] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
A__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
A__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : Optional[int] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
A__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
A__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : Optional[int] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 362
|
from __future__ import annotations
def UpperCamelCase (lowercase_: list[int] , lowercase_: list[int] , lowercase_: int ) -> tuple[float, list[float]]:
A__ : Tuple = list(range(len(lowercase_ ) ) )
A__ : Union[str, Any] = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ : float = 0
A__ : list[float] = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
| 0
|
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : int ,A : int ):
__A = n
__A = [None] * self.n
__A = 0 # index of the first element
__A = 0
__A = 0
def __len__( self : Tuple ):
return self.size
def UpperCamelCase_ ( self : int ):
return self.size == 0
def UpperCamelCase_ ( self : str ):
return False if self.is_empty() else self.array[self.front]
def UpperCamelCase_ ( self : Union[str, Any] ,A : Dict ):
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
__A = data
__A = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCamelCase_ ( self : Union[str, Any] ):
if self.size == 0:
raise Exception("UNDERFLOW" )
__A = self.array[self.front]
__A = None
__A = (self.front + 1) % self.n
self.size -= 1
return temp
| 15
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a_ : List[str] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
a_ : Any = {
"yjernite/retribert-base-uncased": 5_1_2,
}
a_ : Tuple = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = RetriBertTokenizer
_lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> Tuple:
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __magic_name__ ) != do_lower_case
or normalizer_state.get('strip_accents' , __magic_name__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __magic_name__ ) != tokenize_chinese_chars
):
_a = getattr(__magic_name__ , normalizer_state.pop('type' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**__magic_name__ )
_a = do_lower_case
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=None ) -> Union[str, Any]:
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 168
| 0
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
snake_case__ : Any = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
snake_case__ : int = '''sshleifer/student_marian_en_ro_6_1'''
snake_case__ : Tuple = '''sshleifer/tiny-mbart'''
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def _UpperCamelCase ( self , snake_case_=False , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=snake_case_ , num_train_epochs=1 , distributed=snake_case_ , extra_args_str=snake_case_ , predict_with_generate=snake_case_ , do_train=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , )
UpperCAmelCase_ : List[Any] = TrainerState.load_from_json(os.path.join(snake_case_ , 'trainer_state.json' ) ).log_history
if not do_eval:
return
UpperCAmelCase_ : Tuple = [log for log in logs if 'eval_loss' in log.keys()]
UpperCAmelCase_ : str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ : List[str] = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , snake_case_ )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _UpperCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _UpperCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=snake_case_ )
@require_torch_multi_gpu
def _UpperCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=snake_case_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _UpperCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _UpperCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _UpperCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=snake_case_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _UpperCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(
distributed=snake_case_ , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=snake_case_ )
@require_apex
@require_torch_gpu
def _UpperCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
UpperCAmelCase_ : Optional[Any] = experiments[experiment_id]
UpperCAmelCase_ : Optional[int] = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
UpperCAmelCase_ : Optional[int] = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**snake_case_ , extra_args_str=data['extra_args_str'] )
UpperCAmelCase_ : Tuple = len(re.findall(snake_case_ , cl.err ) )
self.assertEqual(snake_case_ , data['n_matches'] )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=snake_case_ , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=snake_case_ , )
# Check metrics
UpperCAmelCase_ : int = TrainerState.load_from_json(os.path.join(snake_case_ , 'trainer_state.json' ) ).log_history
UpperCAmelCase_ : Tuple = [log for log in logs if 'eval_loss' in log.keys()]
UpperCAmelCase_ : Union[str, Any] = eval_metrics[0]
UpperCAmelCase_ : Union[str, Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , snake_case_ )
# test if do_predict saves generations and metrics
UpperCAmelCase_ : Union[str, Any] = os.listdir(snake_case_ )
UpperCAmelCase_ : List[str] = {os.path.basename(snake_case_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _UpperCamelCase ( self ):
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(snake_case_ ) -> Tuple[int, float]:
UpperCAmelCase_ : Union[str, Any] = '--skip_memory_metrics 0'
UpperCAmelCase_ : List[str] = self.run_trainer(
max_len=1_2_8 , model_name=snake_case_ , learning_rate=3E-4 , num_train_epochs=1 , optim=snake_case_ , distributed=snake_case_ , extra_args_str=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ : List[str] = TrainerState.load_from_json(Path(snake_case_ , 'trainer_state.json' ) ).log_history
UpperCAmelCase_ : Optional[int] = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**2_0 )
UpperCAmelCase_ : Optional[int] = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**2_0 )
UpperCAmelCase_ : List[Any] = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ : Any = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
UpperCAmelCase_ : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
UpperCAmelCase_ : Optional[int] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ : Optional[int] = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ : List[str] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ : int = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ : Tuple = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
snake_case_ , snake_case_ , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
snake_case_ , snake_case_ , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
snake_case_ , snake_case_ , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 3E-3 , snake_case_ = "adafactor" , snake_case_ = False , snake_case_ = None , snake_case_ = 0 , snake_case_ = True , snake_case_ = True , snake_case_ = True , snake_case_ = True , snake_case_ = None , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
UpperCAmelCase_ : List[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : Any = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(snake_case_ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(snake_case_ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
UpperCAmelCase_ : Tuple = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(snake_case_ )}
'''.split()
UpperCAmelCase_ : List[str] = '\n --do_predict\n '.split()
UpperCAmelCase_ : Optional[Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ : List[str] = get_gpu_count()
UpperCAmelCase_ : int = get_torch_dist_unique_port()
UpperCAmelCase_ : Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
UpperCAmelCase_ : Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case_ , env=self.get_env() )
else:
UpperCAmelCase_ : List[Any] = ['run_translation.py'] + args
with patch.object(snake_case_ , 'argv' , snake_case_ ):
main()
return output_dir
| 356
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Union[str, Any] = 1
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = None ):
'''simple docstring'''
self.set_timesteps(snake_case_ )
# standard deviation of the initial noise distribution
UpperCAmelCase_ : Union[str, Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCAmelCase_ : int = 4
# running values
UpperCAmelCase_ : str = []
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = num_inference_steps
UpperCAmelCase_ : int = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCAmelCase_ : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCAmelCase_ : Optional[int] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCAmelCase_ : Tuple = torch.sin(steps * math.pi / 2 ) ** 2
UpperCAmelCase_ : Dict = (1.0 - self.betas**2) ** 0.5
UpperCAmelCase_ : str = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCAmelCase_ : str = timesteps.to(snake_case_ )
UpperCAmelCase_ : Any = []
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
UpperCAmelCase_ : Any = (self.timesteps == timestep).nonzero().item()
UpperCAmelCase_ : Optional[Any] = timestep_index + 1
UpperCAmelCase_ : Dict = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(snake_case_ )
if len(self.ets ) == 1:
UpperCAmelCase_ : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
UpperCAmelCase_ : Any = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCAmelCase_ : List[str] = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
UpperCAmelCase_ : Union[str, Any] = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
UpperCAmelCase_ : Union[str, Any] = self._get_prev_sample(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def _UpperCamelCase ( self , snake_case_ , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return sample
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = self.alphas[timestep_index]
UpperCAmelCase_ : Union[str, Any] = self.betas[timestep_index]
UpperCAmelCase_ : Any = self.alphas[prev_timestep_index]
UpperCAmelCase_ : Dict = self.betas[prev_timestep_index]
UpperCAmelCase_ : List[Any] = (sample - sigma * ets) / max(snake_case_ , 1E-8 )
UpperCAmelCase_ : Tuple = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 274
| 0
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A__ = '''http://www.mocksite.com/file1.txt'''
A__ = '''"text": ["foo", "foo"]'''
A__ = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class a :
__lowerCAmelCase : Optional[int] = 2_00
__lowerCAmelCase : List[str] = {"""Content-Length""": """100"""}
__lowerCAmelCase : Dict = {}
def __lowerCamelCase ( self :Dict ,**__lowercase :List[Any] ):
return [bytes(__lowercase ,'''utf-8''' )]
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(__lowerCAmelCase , '''request''' , __lowerCAmelCase )
snake_case__ : Union[str, Any] = URL
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Optional[Any] = url
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : int = [url]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : int = {'''train''': url}
snake_case__ : Dict = '''dummy'''
snake_case__ : Any = '''downloads'''
snake_case__ : int = tmp_path
snake_case__ : Any = DownloadConfig(
cache_dir=os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , use_etag=__lowerCAmelCase , )
snake_case__ : Tuple = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
snake_case__ : List[Any] = dl_manager.download(__lowerCAmelCase )
snake_case__ : Union[str, Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Optional[int] = [downloaded_paths]
snake_case__ : Dict = [urls]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
snake_case__ : str = downloaded_paths.values()
snake_case__ : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
snake_case__ : List[Any] = Path(__lowerCAmelCase )
snake_case__ : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
snake_case__ : List[str] = downloaded_path.read_text()
assert content == CONTENT
snake_case__ : List[str] = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
snake_case__ : str = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : Any = str(__lowerCAmelCase )
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Tuple = filename
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = [filename]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = {'''train''': filename}
snake_case__ : Any = '''dummy'''
snake_case__ : Any = xz_file.parent
snake_case__ : List[str] = '''extracted'''
snake_case__ : Dict = DownloadConfig(
cache_dir=__lowerCAmelCase , use_etag=__lowerCAmelCase , )
snake_case__ : Dict = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
snake_case__ : str = dl_manager.extract(__lowerCAmelCase )
snake_case__ : int = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = [extracted_paths]
snake_case__ : Optional[Any] = [paths]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in extracted_paths.keys()
snake_case__ : int = extracted_paths.values()
snake_case__ : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
snake_case__ : Optional[int] = Path(__lowerCAmelCase )
snake_case__ : int = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCAmelCase , etag=__lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
snake_case__ : List[Any] = extracted_path.read_text()
snake_case__ : List[str] = text_file.read_text()
assert extracted_file_content == expected_file_content
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__lowerCAmelCase , start=1 ):
snake_case__ : Any = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = request.getfixturevalue(__lowerCAmelCase )
snake_case__ : Union[str, Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = request.getfixturevalue(__lowerCAmelCase )
snake_case__ : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Any = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCAmelCase ) , start=1 ):
assert os.path.basename(__lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 230
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Union[str, Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : int = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__lowercase ) ,torch_builtin(__lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(__lowercase ) ,gelu_new(__lowercase ) ) )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation('''gelu''' )
snake_case__ : int = get_activation('''gelu_10''' )
snake_case__ : Optional[int] = torch_builtin(__lowercase )
snake_case__ : str = geluaa(__lowercase )
snake_case__ : Tuple = torch.where(y_gelu_aa < 10.0 ,1 ,0 )
self.assertTrue(torch.max(__lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask ,y_gelu_aa * clipped_mask ) )
def __lowerCamelCase ( self :Any ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__lowercase ):
get_activation('''bogus''' )
with self.assertRaises(__lowercase ):
get_activation(__lowercase )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = get_activation('''gelu''' )
snake_case__ : List[Any] = 1
snake_case__ : Optional[Any] = get_activation('''gelu''' )
self.assertEqual(acta.a ,1 )
with self.assertRaises(__lowercase ):
snake_case__ : str = acta.a
| 230
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=_SCREAMING_SNAKE_CASE )
env_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
launch_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
tpu_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
test_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
# Let's go
lowerCAmelCase = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 187
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.0_2 , A_=3 , A_=4 , A_=None , ) -> Dict:
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 384
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = """gelu"""
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 512
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.0_2
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = 128
lowerCAmelCase = 2
lowerCAmelCase = 9
lowerCAmelCase = 1
lowerCAmelCase = None
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
lowerCAmelCase = TFConvBertModel(config=A_ )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(A_ )
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
lowerCAmelCase = TFConvBertForMaskedLM(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFConvBertForSequenceClassification(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFConvBertForMultipleChoice(config=A_ )
lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFConvBertForTokenClassification(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
lowerCAmelCase = TFConvBertForQuestionAnswering(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Union[str, Any] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Dict = False
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = TFConvBertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __snake_case ( self ) -> Any:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = True
if hasattr(A_ , """use_cache""" ):
lowerCAmelCase = True
lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase = getattr(self.model_tester , """key_length""" , A_ )
for model_class in self.all_model_classes:
lowerCAmelCase = self._prepare_for_class(A_ , A_ )
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
lowerCAmelCase = os.path.join(A_ , """saved_model""" , """1""" )
lowerCAmelCase = tf.keras.models.load_model(A_ )
lowerCAmelCase = model(A_ )
if self.is_encoder_decoder:
lowerCAmelCase = outputs["""encoder_hidden_states"""]
lowerCAmelCase = outputs["""encoder_attentions"""]
else:
lowerCAmelCase = outputs["""hidden_states"""]
lowerCAmelCase = outputs["""attentions"""]
self.assertEqual(len(A_ ) , A_ )
lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase = getattr(self.model_tester , """key_length""" , A_ )
lowerCAmelCase = getattr(self.model_tester , """key_length""" , A_ )
def check_decoder_attentions_output(A_ ):
lowerCAmelCase = len(A_ )
self.assertEqual(out_len % 2 , 0 )
lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
lowerCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) )
lowerCAmelCase = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> Any:
lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(A_ )[0]
lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , A_ )
lowerCAmelCase = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 187
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 339
|
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339
| 1
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
class __snake_case ( snake_case_ ):
def __init__( self : Any , *A : Optional[int] , **A : int ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , _A , )
super().__init__(*_A , **_A )
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , *A : Dict , A : Optional[int]=None , A : Tuple=None , **A : Optional[int] ):
super().__init__(*A , **A )
__snake_case: List[Any] = eval_examples
__snake_case: str = post_process_function
def UpperCAmelCase__ ( self : List[Any] , A : Dict=None , A : int=None , A : List[Any]=None , A : str = "eval" ):
__snake_case: int = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case: Any = self.get_eval_dataloader(A )
__snake_case: Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Union[str, Any] = self.compute_metrics
__snake_case: List[str] = None
__snake_case: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Tuple = time.time()
try:
__snake_case: Any = eval_loop(
A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: Optional[int] = compute_metrics
__snake_case: Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case: List[str] = self.post_process_function(A , A , output.predictions )
__snake_case: List[Any] = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: str = metrics.pop(A )
metrics.update(output.metrics )
else:
__snake_case: List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A )
return metrics
def UpperCAmelCase__ ( self : Optional[Any] , A : List[Any] , A : List[str] , A : str=None , A : str = "test" ):
__snake_case: Optional[Any] = self.get_test_dataloader(A )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Optional[int] = self.compute_metrics
__snake_case: List[Any] = None
__snake_case: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Dict = time.time()
try:
__snake_case: str = eval_loop(
A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: List[Any] = compute_metrics
__snake_case: Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case: Union[str, Any] = self.post_process_function(A , A , output.predictions , """predict""" )
__snake_case: str = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: List[str] = metrics.pop(A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
| 293
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
__SCREAMING_SNAKE_CASE : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__SCREAMING_SNAKE_CASE : List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = "whisper"
__UpperCamelCase: int = ["past_key_values"]
__UpperCamelCase: Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , A : List[str]=51865 , A : Dict=80 , A : Any=6 , A : Any=4 , A : List[str]=6 , A : Union[str, Any]=4 , A : Optional[Any]=1536 , A : Optional[int]=1536 , A : Tuple=0.0 , A : str=0.0 , A : str=50257 , A : Optional[int]=True , A : Union[str, Any]=True , A : Dict="gelu" , A : Optional[Any]=256 , A : Tuple=0.0 , A : List[str]=0.0 , A : str=0.0 , A : Optional[Any]=0.02 , A : Tuple=False , A : Optional[Any]=1500 , A : Any=448 , A : Dict=50256 , A : int=50256 , A : str=50256 , A : str=None , A : Dict=[220, 50256] , A : str=False , A : int=256 , A : int=False , A : Optional[Any]=0.05 , A : Optional[Any]=10 , A : str=2 , A : str=0.0 , A : Any=10 , A : Any=0 , A : List[str]=7 , **A : Union[str, Any] , ):
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Union[str, Any] = num_mel_bins
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : Union[str, Any] = encoder_attention_heads
_UpperCAmelCase : List[Any] = decoder_layers
_UpperCAmelCase : Dict = decoder_attention_heads
_UpperCAmelCase : List[Any] = decoder_ffn_dim
_UpperCAmelCase : Tuple = encoder_ffn_dim
_UpperCAmelCase : Tuple = dropout
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : Tuple = activation_dropout
_UpperCAmelCase : Dict = activation_function
_UpperCAmelCase : Any = init_std
_UpperCAmelCase : Tuple = encoder_layerdrop
_UpperCAmelCase : Any = decoder_layerdrop
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : Tuple = encoder_layers
_UpperCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : Optional[Any] = max_source_positions
_UpperCAmelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase : Optional[int] = classifier_proj_size
_UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : Union[str, Any] = apply_spec_augment
_UpperCAmelCase : List[str] = mask_time_prob
_UpperCAmelCase : List[Any] = mask_time_length
_UpperCAmelCase : Optional[Any] = mask_time_min_masks
_UpperCAmelCase : Optional[Any] = mask_feature_prob
_UpperCAmelCase : List[str] = mask_feature_length
_UpperCAmelCase : str = mask_feature_min_masks
_UpperCAmelCase : Optional[Any] = median_filter_width
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , suppress_tokens=A , begin_suppress_tokens=A , **A , )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@property
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Optional[int] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
_UpperCAmelCase : Any = {0: "batch"}
else:
_UpperCAmelCase : Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(A , direction="inputs" )
return common_inputs
def _A ( self : str , A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , A : int = -1 , A : int = -1 , A : bool = False , A : Optional["TensorType"] = None , A : int = 22050 , A : float = 5.0 , A : int = 220 , ):
_UpperCAmelCase : int = OrderedDict()
_UpperCAmelCase : Optional[int] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=A , framework=A , sampling_rate=A , time_duration=A , frequency=A , )
_UpperCAmelCase : List[str] = encoder_inputs["input_features"].shape[2]
_UpperCAmelCase : Optional[int] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCAmelCase : Any = super().generate_dummy_inputs(
preprocessor.tokenizer , A , A , A , A )
_UpperCAmelCase : int = encoder_inputs.pop("input_features" )
_UpperCAmelCase : Any = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
_UpperCAmelCase : str = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def _A ( self : Dict ):
return 1E-3
| 31
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
UpperCAmelCase = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = RealmTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : int=None , __lowercase : List[Any]=True , __lowercase : Any="[UNK]" , __lowercase : Union[str, Any]="[SEP]" , __lowercase : Union[str, Any]="[PAD]" , __lowercase : Tuple="[CLS]" , __lowercase : List[Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : Union[str, Any]=None , **__lowercase : int , ):
"""simple docstring"""
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowercase ) != tokenize_chinese_chars
):
__lowercase =getattr(__lowercase , normalizer_state.pop('type' ) )
__lowercase =do_lower_case
__lowercase =strip_accents
__lowercase =tokenize_chinese_chars
__lowercase =normalizer_class(**__lowercase )
__lowercase =do_lower_case
def snake_case ( self : List[str] , __lowercase : Optional[Any] , **__lowercase : Any ):
"""simple docstring"""
__lowercase =PaddingStrategy.MAX_LENGTH
__lowercase =text
__lowercase =kwargs.pop('text_pair' , __lowercase )
__lowercase =kwargs.pop('return_tensors' , __lowercase )
__lowercase ={
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
__lowercase =batch_text_pair[idx]
else:
__lowercase =None
__lowercase =super().__call__(__lowercase , __lowercase , return_tensors=__lowercase , **__lowercase )
__lowercase =encoded_candidates.get('input_ids' )
__lowercase =encoded_candidates.get('attention_mask' )
__lowercase =encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
__lowercase ={key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase , tensor_type=__lowercase )
def snake_case ( self : List[str] , __lowercase : Tuple , __lowercase : Optional[int]=None ):
"""simple docstring"""
__lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : Dict , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
__lowercase =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 141
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str =logging.get_logger(__name__)
_lowercase : Union[str, Any] ="▁"
_lowercase : List[Any] ={"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Optional[Any] ={
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowercase : Dict ={
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
# fmt: off
_lowercase : List[Any] =["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Optional[int] = VOCAB_FILES_NAMES
__lowerCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :Optional[int] = ["input_ids", "attention_mask"]
__lowerCAmelCase :List[int] = []
__lowerCAmelCase :List[int] = []
def __init__( self , __lowercase , __lowercase=None , __lowercase=None , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase = None , **__lowercase , ) -> None:
"""simple docstring"""
a__ : Union[str, Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
a__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
a__ : Tuple = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowercase , tgt_lang=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
a__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowercase ) )
a__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ : Tuple = 1
a__ : Optional[Any] = len(self.sp_model )
a__ : Tuple = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowercase )
}
a__ : str = {v: k for k, v in self.lang_code_to_id.items()}
a__ : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a__ : Optional[Any] = src_lang if src_lang is not None else """en_XX"""
a__ : str = self.lang_code_to_id[self._src_lang]
a__ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> None:
"""simple docstring"""
a__ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
"""simple docstring"""
a__ : List[Any] = self.__dict__.copy()
a__ : Union[str, Any] = None
return state
def __setstate__( self , __lowercase ) -> None:
"""simple docstring"""
a__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a__ : Union[str, Any] = {}
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : List[Any] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ : List[Any] = self.sp_model.PieceToId(__lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
a__ : List[str] = []
a__ : List[str] = """"""
a__ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase ) + token
a__ : List[str] = True
a__ : str = []
else:
current_sub_tokens.append(__lowercase )
a__ : Any = False
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : List[Any] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , """wb""" ) as fi:
a__ : Tuple = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
a__ : Tuple = [1] * len(self.prefix_tokens )
a__ : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowercase )) + suffix_ones
return prefix_ones + ([0] * len(__lowercase )) + ([0] * len(__lowercase )) + suffix_ones
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> str:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
a__ : Any = src_lang
a__ : int = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
a__ : int = self.convert_tokens_to_ids(__lowercase )
a__ : str = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = "en_XX" , __lowercase = None , __lowercase = "ro_RO" , **__lowercase , ) -> BatchEncoding:
"""simple docstring"""
a__ : str = src_lang
a__ : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> None:
"""simple docstring"""
a__ : List[str] = self.lang_code_to_id[src_lang]
a__ : str = [self.cur_lang_code_id]
a__ : Optional[int] = [self.eos_token_id]
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> None:
"""simple docstring"""
a__ : List[str] = self.lang_code_to_id[tgt_lang]
a__ : Optional[int] = [self.cur_lang_code_id]
a__ : Union[str, Any] = [self.eos_token_id]
| 266
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : Union[str, Any] =16
_lowercase : Dict =32
def lowerCAmelCase_ ( _lowercase : Accelerator , _lowercase : int = 16 , _lowercase : str = "bert-base-cased") -> Union[str, Any]:
"""simple docstring"""
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(_lowercase)
a__ : Any = load_dataset("""glue""" , """mrpc""")
def tokenize_function(_lowercase : str):
# max_length=None => use the model max length (it's actually the default)
a__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowercase , max_length=_lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ : Any = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_lowercase)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(_lowercase : Optional[Any]):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return tokenizer.pad(_lowercase , padding="""longest""" , return_tensors="""pt""")
# Instantiate dataloaders.
a__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase)
a__ : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase)
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Dict) -> int:
"""simple docstring"""
# Initialize accelerator
a__ : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Union[str, Any] = config["""lr"""]
a__ : List[str] = int(config["""num_epochs"""])
a__ : List[str] = int(config["""seed"""])
a__ : Tuple = int(config["""batch_size"""])
a__ : int = args.model_name_or_path
set_seed(_lowercase)
a__ , a__ : int = get_dataloaders(_lowercase , _lowercase , _lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase)
# Instantiate optimizer
a__ : int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowercase)
if accelerator.state.deepspeed_plugin is not None:
a__ : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
a__ : List[str] = 1
a__ : List[Any] = (len(_lowercase) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ : Dict = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
a__ : Dict = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : List[Any] = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
# We need to keep track of how many total steps we have iterated over
a__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
a__ : Optional[int] = 0
# Now we train the model
a__ : Tuple = evaluate.load("""glue""" , """mrpc""")
a__ : List[Any] = 0
a__ : Tuple = {}
for epoch in range(_lowercase , _lowercase):
model.train()
for step, batch in enumerate(_lowercase):
a__ : Union[str, Any] = model(**_lowercase)
a__ : Tuple = outputs.loss
a__ : Any = loss / gradient_accumulation_steps
accelerator.backward(_lowercase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
a__ : int = 0
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
a__ : str = model(**_lowercase)
a__ : Union[str, Any] = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
a__ , a__ : Optional[int] = accelerator.gather(
(predictions, batch["""labels"""])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase) - 1:
a__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset) - samples_seen]
a__ : Any = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
a__ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowercase)
a__ : Any = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
a__ : List[str] = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""") , """w""") as f:
json.dump(_lowercase , _lowercase)
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""")
parser.add_argument(
"""--model_name_or_path""" , type=_lowercase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_lowercase , )
parser.add_argument(
"""--output_dir""" , type=_lowercase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=_lowercase , default=_lowercase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=_lowercase , default=3 , help="""Number of train epochs.""" , )
a__ : Any = parser.parse_args()
a__ : Dict = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_lowercase , _lowercase)
if __name__ == "__main__":
main()
| 266
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowercase_ ( _A : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = SwinvaConfig()
lowerCamelCase__ : Optional[int] = swinva_name.split("_" )
lowerCamelCase__ : Union[str, Any] = name_split[1]
if "to" in name_split[3]:
lowerCamelCase__ : Dict = int(name_split[3][-3:] )
else:
lowerCamelCase__ : Any = int(name_split[3] )
if "to" in name_split[2]:
lowerCamelCase__ : str = int(name_split[2][-2:] )
else:
lowerCamelCase__ : Any = int(name_split[2][6:] )
if model_size == "tiny":
lowerCamelCase__ : Union[str, Any] = 96
lowerCamelCase__ : Union[str, Any] = (2, 2, 6, 2)
lowerCamelCase__ : int = (3, 6, 12, 24)
elif model_size == "small":
lowerCamelCase__ : Optional[int] = 96
lowerCamelCase__ : int = (2, 2, 18, 2)
lowerCamelCase__ : Optional[Any] = (3, 6, 12, 24)
elif model_size == "base":
lowerCamelCase__ : Tuple = 128
lowerCamelCase__ : Any = (2, 2, 18, 2)
lowerCamelCase__ : List[Any] = (4, 8, 16, 32)
else:
lowerCamelCase__ : Tuple = 192
lowerCamelCase__ : Optional[Any] = (2, 2, 18, 2)
lowerCamelCase__ : List[Any] = (6, 12, 24, 48)
if "to" in swinva_name:
lowerCamelCase__ : Dict = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowerCamelCase__ : Optional[int] = 21841
lowerCamelCase__ : List[str] = "huggingface/label-files"
lowerCamelCase__ : int = "imagenet-22k-id2label.json"
lowerCamelCase__ : Dict = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ : Dict = {int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
else:
lowerCamelCase__ : int = 1000
lowerCamelCase__ : Any = "huggingface/label-files"
lowerCamelCase__ : Optional[int] = "imagenet-1k-id2label.json"
lowerCamelCase__ : str = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ : Dict = {int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = img_size
lowerCamelCase__ : Tuple = num_classes
lowerCamelCase__ : List[str] = embed_dim
lowerCamelCase__ : int = depths
lowerCamelCase__ : List[Any] = num_heads
lowerCamelCase__ : Dict = window_size
return config
def lowercase_ ( _A : List[str] ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCamelCase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowerCamelCase__ : List[Any] = "encoder." + name
if "attn.proj" in name:
lowerCamelCase__ : str = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__ : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__ : Tuple = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__ : str = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
lowerCamelCase__ : Dict = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
lowerCamelCase__ : Optional[Any] = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
lowerCamelCase__ : Optional[int] = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
lowerCamelCase__ : Optional[int] = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
lowerCamelCase__ : List[Any] = "layernorm.weight"
if name == "norm.bias":
lowerCamelCase__ : Optional[Any] = "layernorm.bias"
if "head" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("head" , "classifier" )
else:
lowerCamelCase__ : List[str] = "swinv2." + name
return name
def lowercase_ ( _A : Tuple , _A : List[str] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : List[str] = orig_state_dict.pop(__a )
if "mask" in key:
continue
elif "qkv" in key:
lowerCamelCase__ : Optional[Any] = key.split("." )
lowerCamelCase__ : Union[str, Any] = int(key_split[1] )
lowerCamelCase__ : str = int(key_split[3] )
lowerCamelCase__ : Union[str, Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase__ : List[Any] = val[:dim, :]
lowerCamelCase__ : List[Any] = val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] = val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] = val[:dim]
lowerCamelCase__ : str = val[
dim : dim * 2
]
lowerCamelCase__ : Dict = val[-dim:]
else:
lowerCamelCase__ : List[Any] = val
return orig_state_dict
def lowercase_ ( _A : str , _A : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = timm.create_model(__a , pretrained=__a )
timm_model.eval()
lowerCamelCase__ : int = get_swinva_config(__a )
lowerCamelCase__ : Optional[Any] = SwinvaForImageClassification(__a )
model.eval()
lowerCamelCase__ : List[Any] = convert_state_dict(timm_model.state_dict() , __a )
model.load_state_dict(__a )
lowerCamelCase__ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
lowerCamelCase__ : Tuple = Image.open(requests.get(__a , stream=__a ).raw )
lowerCamelCase__ : Dict = image_processor(images=__a , return_tensors="pt" )
lowerCamelCase__ : Tuple = timm_model(inputs["pixel_values"] )
lowerCamelCase__ : Any = model(**__a ).logits
assert torch.allclose(__a , __a , atol=1E-3 )
print(F"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__a )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__a )
model.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A : Optional[int] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 184
|
def __lowerCamelCase ( __a :str ) -> list:
"""simple docstring"""
A__ = [0] * len(__a )
for i in range(1 , len(__a ) ):
# use last results for better performance - dynamic programming
A__ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
A__ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
A__ = j
return prefix_result
def __lowerCamelCase ( __a :str ) -> int:
"""simple docstring"""
return max(prefix_function(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274
| 0
|
from __future__ import annotations
import math
import random
from typing import Any
class __lowerCAmelCase :
def __init__( self :int ):
'''simple docstring'''
a = []
a = 0
a = 0
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Any ):
'''simple docstring'''
self.data.append(__magic_name__ )
a = self.tail + 1
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.data[self.head]
a = self.head + 1
return ret
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class __lowerCAmelCase :
def __init__( self :Tuple , __magic_name__ :Any ):
'''simple docstring'''
a = data
a = None
a = None
a = 1
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Any ):
'''simple docstring'''
a = data
def lowerCamelCase__ ( self :int , __magic_name__ :MyNode | None ):
'''simple docstring'''
a = node
def lowerCamelCase__ ( self :int , __magic_name__ :MyNode | None ):
'''simple docstring'''
a = node
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int ):
'''simple docstring'''
a = height
def __A ( __lowerCamelCase ) -> int:
if node is None:
return 0
return node.get_height()
def __A ( __lowerCamelCase , __lowerCamelCase ) -> int:
if a > b:
return a
return b
def __A ( __lowerCamelCase ) -> MyNode:
print("""left rotation node:""" , node.get_data() )
a = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__lowerCamelCase )
a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowerCamelCase )
a = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__lowerCamelCase )
return ret
def __A ( __lowerCamelCase ) -> MyNode:
print("""right rotation node:""" , node.get_data() )
a = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__lowerCamelCase )
a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowerCamelCase )
a = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__lowerCamelCase )
return ret
def __A ( __lowerCamelCase ) -> MyNode:
a = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__lowerCamelCase ) )
return right_rotation(__lowerCamelCase )
def __A ( __lowerCamelCase ) -> MyNode:
a = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__lowerCamelCase ) )
return left_rotation(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> MyNode | None:
if node is None:
return MyNode(__lowerCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __lowerCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
a = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
a = right_rotation(__lowerCamelCase )
else:
a = lr_rotation(__lowerCamelCase )
else:
node.set_right(insert_node(node.get_right() , __lowerCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
a = node.get_right()
assert right_child is not None
if data < right_child.get_data():
a = rl_rotation(__lowerCamelCase )
else:
a = left_rotation(__lowerCamelCase )
a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowerCamelCase )
return node
def __A ( __lowerCamelCase ) -> Any:
while True:
a = root.get_right()
if right_child is None:
break
a = right_child
return root.get_data()
def __A ( __lowerCamelCase ) -> Any:
while True:
a = root.get_left()
if left_child is None:
break
a = left_child
return root.get_data()
def __A ( __lowerCamelCase , __lowerCamelCase ) -> MyNode | None:
a = root.get_left()
a = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
a = get_left_most(__lowerCamelCase )
root.set_data(__lowerCamelCase )
root.set_right(del_node(__lowerCamelCase , __lowerCamelCase ) )
elif left_child is not None:
a = left_child
elif right_child is not None:
a = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(__lowerCamelCase , __lowerCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__lowerCamelCase , __lowerCamelCase ) )
if get_height(__lowerCamelCase ) - get_height(__lowerCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
a = left_rotation(__lowerCamelCase )
else:
a = rl_rotation(__lowerCamelCase )
elif get_height(__lowerCamelCase ) - get_height(__lowerCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
a = right_rotation(__lowerCamelCase )
else:
a = lr_rotation(__lowerCamelCase )
a = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__lowerCamelCase )
return root
class __lowerCAmelCase :
def __init__( self :int ):
'''simple docstring'''
a = None
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Any ):
'''simple docstring'''
print("""insert:""" + str(__magic_name__ ) )
a = insert_node(self.root , __magic_name__ )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Any ):
'''simple docstring'''
print("""delete:""" + str(__magic_name__ ) )
if self.root is None:
print("""Tree is empty!""" )
return
a = del_node(self.root , __magic_name__ )
def __str__( self :List[str] , ): # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
a = """"""
a = MyQueue()
q.push(self.root )
a = self.get_height()
if layer == 0:
return output
a = 0
while not q.is_empty():
a = q.pop()
a = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__magic_name__ )
q.push(__magic_name__ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
a = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , __magic_name__ ) - 1:
a = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __A ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__UpperCamelCase : Optional[int] = AVLtree()
__UpperCamelCase : int = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 347
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347
| 1
|
from collections import defaultdict
from math import ceil, sqrt
def lowerCamelCase__ ( _A = 1000000 , _A = 10 ):
'''simple docstring'''
snake_case_ = defaultdict(_A )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_A , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 187
|
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict ):
"""simple docstring"""
snake_case_ = {} # Mapping from char to TrieNode
snake_case_ = False
def snake_case__ ( self : Dict , __lowercase : list[str] ):
"""simple docstring"""
for word in words:
self.insert(__lowercase )
def snake_case__ ( self : List[str] , __lowercase : str ):
"""simple docstring"""
snake_case_ = self
for char in word:
if char not in curr.nodes:
snake_case_ = TrieNode()
snake_case_ = curr.nodes[char]
snake_case_ = True
def snake_case__ ( self : List[Any] , __lowercase : str ):
"""simple docstring"""
snake_case_ = self
for char in word:
if char not in curr.nodes:
return False
snake_case_ = curr.nodes[char]
return curr.is_leaf
def snake_case__ ( self : Optional[Any] , __lowercase : str ):
"""simple docstring"""
def _delete(__lowercase : TrieNode , __lowercase : str , __lowercase : int ) -> bool:
if index == len(__lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
snake_case_ = False
return len(curr.nodes ) == 0
snake_case_ = word[index]
snake_case_ = curr.nodes.get(__lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
snake_case_ = _delete(__lowercase , __lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __lowercase , 0 )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if node.is_leaf:
print(_A , end=" " )
for key, value in node.nodes.items():
print_words(_A , word + key )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = "banana bananas bandana band apple all beast".split()
snake_case_ = TrieNode()
root.insert_many(_A )
# print_words(root, "")
assert all(root.find(_A ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
print(str(_A ) , "works!" if passes else "doesn't work :(" )
def lowerCamelCase__ ( ):
'''simple docstring'''
assert test_trie()
def lowerCamelCase__ ( ):
'''simple docstring'''
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 187
| 1
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( snake_case ):
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )-> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCamelCase_ = (
F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , _lowercase , standard_warn=_lowercase )
UpperCamelCase_ = dict(scheduler.config )
UpperCamelCase_ = 1
UpperCamelCase_ = FrozenDict(_lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCamelCase_ = (
F"The configuration file of this scheduler: {scheduler} has not set the configuration"
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , _lowercase , standard_warn=_lowercase )
UpperCamelCase_ = dict(scheduler.config )
UpperCamelCase_ = True
UpperCamelCase_ = FrozenDict(_lowercase )
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=_lowercase , segmentation_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , )
def UpperCAmelCase_ ( self , _lowercase = "auto" )-> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def UpperCAmelCase_ ( self )-> List[Any]:
self.enable_attention_slicing(_lowercase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self )-> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 50 , _lowercase = 7.5 , _lowercase = None , _lowercase = 1 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , **_lowercase , )-> List[Any]:
UpperCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCamelCase_ = self.segmentation_model(**_lowercase )
UpperCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCamelCase_ = self.numpy_to_pil(_lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , )
| 60
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = """bridgetower_vision_model"""
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=3 , _lowercase=16 , _lowercase=288 , _lowercase=1 , _lowercase=1e-0_5 , _lowercase=False , _lowercase=True , _lowercase=False , **_lowercase , )-> Optional[Any]:
super().__init__(**_lowercase )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = stop_gradient
UpperCamelCase_ = share_layernorm
UpperCamelCase_ = remove_last_layer
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , **_lowercase )-> "PretrainedConfig":
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_lowercase , **_lowercase )
if config_dict.get("model_type" ) == "bridgetower":
UpperCamelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowercase , **_lowercase )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Optional[int] = """bridgetower_text_model"""
def __init__( self , _lowercase=50_265 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=1 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=514 , _lowercase=1 , _lowercase=1e-0_5 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , **_lowercase , )-> Optional[int]:
super().__init__(**_lowercase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , **_lowercase )-> "PretrainedConfig":
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_lowercase , **_lowercase )
if config_dict.get("model_type" ) == "bridgetower":
UpperCamelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowercase , **_lowercase )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = """bridgetower"""
def __init__( self , _lowercase=True , _lowercase="gelu" , _lowercase=768 , _lowercase=1 , _lowercase=1e-0_5 , _lowercase=False , _lowercase="add" , _lowercase=12 , _lowercase=6 , _lowercase=False , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , )-> List[Any]:
# TODO: remove this once the Hub files are updated.
UpperCamelCase_ = kwargs.pop("text_config_dict" , _lowercase )
UpperCamelCase_ = kwargs.pop("vision_config_dict" , _lowercase )
super().__init__(**_lowercase )
UpperCamelCase_ = share_cross_modal_transformer_layers
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_size
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = share_link_tower_layers
UpperCamelCase_ = link_tower_type
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = tie_word_embeddings
UpperCamelCase_ = init_layernorm_from_vision_encoder
if text_config is None:
UpperCamelCase_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
UpperCamelCase_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
UpperCamelCase_ = BridgeTowerTextConfig(**_lowercase )
UpperCamelCase_ = BridgeTowerVisionConfig(**_lowercase )
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , _lowercase , **_lowercase )-> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowercase )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.text_config.to_dict()
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 60
| 1
|
"""simple docstring"""
def _A ( lowercase ):
"""simple docstring"""
if n_term == "":
return []
a =[]
for temp in range(int(lowercase ) ):
series.append(f'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 81
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :str = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F"Building PyTorch model from configuration: {config}" )
lowerCAmelCase__ :int = BertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 293
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCamelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCamelCase__ = TaTokenizerFast
UpperCamelCase__ = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCamelCase__ = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 356
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( __A, __A, __A=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ = ""
else:
UpperCAmelCase__ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = dct.pop(__A )
UpperCAmelCase__ = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ = Image.open(requests.get(__A, stream=__A ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase__ = 1_000
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = json.load(open(hf_hub_download(__A, __A, repo_type="dataset" ), "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = int(deit_name[-6:-4] )
UpperCAmelCase__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase__ = 192
UpperCAmelCase__ = 768
UpperCAmelCase__ = 12
UpperCAmelCase__ = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase__ = 384
UpperCAmelCase__ = 1_536
UpperCAmelCase__ = 12
UpperCAmelCase__ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase__ = 1_024
UpperCAmelCase__ = 4_096
UpperCAmelCase__ = 24
UpperCAmelCase__ = 16
# load original model from timm
UpperCAmelCase__ = timm.create_model(__A, pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ = timm_model.state_dict()
UpperCAmelCase__ = create_rename_keys(__A, __A )
for src, dest in rename_keys:
rename_key(__A, __A, __A )
read_in_q_k_v(__A, __A, __A )
# load HuggingFace model
UpperCAmelCase__ = DeiTForImageClassificationWithTeacher(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase__ = DeiTImageProcessor(size=__A, crop_size=config.image_size )
UpperCAmelCase__ = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase__ = encoding["pixel_values"]
UpperCAmelCase__ = model(__A )
UpperCAmelCase__ = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A, outputs.logits, atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class snake_case :
'''simple docstring'''
def __init__( self : Dict, _lowerCamelCase : Any ):
'''simple docstring'''
__A = data
__A = None
class snake_case :
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
__A = None
__A = None
def __iter__( self : Any ):
'''simple docstring'''
__A = self.head
while self.head:
yield node.data
__A = node.next
if node == self.head:
break
def __len__( self : int ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ):
'''simple docstring'''
return "->".join(str(_lowerCamelCase ) for item in iter(self ) )
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Any ):
'''simple docstring'''
self.insert_nth(len(self ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ):
'''simple docstring'''
self.insert_nth(0, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
__A = Node(_lowerCamelCase )
if self.head is None:
__A = new_node # first node points itself
__A = __A = new_node
elif index == 0: # insert at head
__A = self.head
__A = __A = new_node
else:
__A = self.head
for _ in range(index - 1 ):
__A = temp.next
__A = temp.next
__A = new_node
if index == len(self ) - 1: # insert at tail
__A = new_node
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return self.delete_nth(0 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
__A = self.head
if self.head == self.tail: # just one node
__A = __A = None
elif index == 0: # delete head node
__A = self.tail.next.next
__A = self.head.next
else:
__A = self.head
for _ in range(index - 1 ):
__A = temp.next
__A = temp.next
__A = temp.next.next
if index == len(self ) - 1: # delete at tail
__A = temp
return delete_node.data
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase ( ):
"""simple docstring"""
__A = CircularLinkedList()
assert len(__UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCamelCase ) == i
circular_linked_list.insert_nth(__UpperCamelCase , i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ):
__A = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sgugger/tiny-distilbert-classification'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
__A = None
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = '''sshleifer/tinier_bart'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''sshleifer/tinier_bart'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ):
self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
| 266
| 1
|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
A__ = {}
A__ = job["""started_at"""]
A__ = job["""completed_at"""]
A__ = date_parser.parse(UpperCAmelCase_ )
A__ = date_parser.parse(UpperCAmelCase_ )
A__ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A__ = start
A__ = end
A__ = duration_in_min
return job_info
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None ):
A__ = None
if token is not None:
A__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
A__ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
A__ = requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
A__ = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCAmelCase_ ) for job in result["""jobs"""]} )
A__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCAmelCase_ ):
A__ = requests.get(url + F"""&page={i + 2}""" , headers=UpperCAmelCase_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCAmelCase_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE_ : int = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 359
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , *UpperCamelCase: Optional[Any] , **UpperCamelCase: str ):
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 69
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ) ->None:
"""simple docstring"""
snake_case_ = []
snake_case_ = 0
snake_case_ = 0
def lowerCAmelCase ( self : List[str] ) ->bool:
"""simple docstring"""
return self.head == self.tail
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any ) ->None:
"""simple docstring"""
self.data.append(UpperCAmelCase_ )
snake_case_ = self.tail + 1
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
snake_case_ = self.data[self.head]
snake_case_ = self.head + 1
return ret
def lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
return self.tail - self.head
def lowerCAmelCase ( self : str ) ->None:
"""simple docstring"""
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class __A :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Any ) ->None:
"""simple docstring"""
snake_case_ = data
snake_case_ = None
snake_case_ = None
snake_case_ = 1
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
return self.data
def lowerCAmelCase ( self : Tuple ) ->MyNode | None:
"""simple docstring"""
return self.left
def lowerCAmelCase ( self : List[Any] ) ->MyNode | None:
"""simple docstring"""
return self.right
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return self.height
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any ) ->None:
"""simple docstring"""
snake_case_ = data
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : MyNode | None ) ->None:
"""simple docstring"""
snake_case_ = node
def lowerCAmelCase ( self : str , UpperCAmelCase_ : MyNode | None ) ->None:
"""simple docstring"""
snake_case_ = node
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
snake_case_ = height
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return node.get_height()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if a > b:
return a
return b
def _a ( _SCREAMING_SNAKE_CASE ) -> MyNode:
print("""left rotation node:""" , node.get_data() )
snake_case_ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_SCREAMING_SNAKE_CASE )
snake_case_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
snake_case_ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_SCREAMING_SNAKE_CASE )
return ret
def _a ( _SCREAMING_SNAKE_CASE ) -> MyNode:
print("""right rotation node:""" , node.get_data() )
snake_case_ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_SCREAMING_SNAKE_CASE )
snake_case_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
snake_case_ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_SCREAMING_SNAKE_CASE )
return ret
def _a ( _SCREAMING_SNAKE_CASE ) -> MyNode:
snake_case_ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_SCREAMING_SNAKE_CASE ) )
return right_rotation(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> MyNode:
snake_case_ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_SCREAMING_SNAKE_CASE ) )
return left_rotation(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> MyNode | None:
if node is None:
return MyNode(_SCREAMING_SNAKE_CASE )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _SCREAMING_SNAKE_CASE ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
snake_case_ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
snake_case_ = right_rotation(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = lr_rotation(_SCREAMING_SNAKE_CASE )
else:
node.set_right(insert_node(node.get_right() , _SCREAMING_SNAKE_CASE ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
snake_case_ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
snake_case_ = rl_rotation(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = left_rotation(_SCREAMING_SNAKE_CASE )
snake_case_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
return node
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
while True:
snake_case_ = root.get_right()
if right_child is None:
break
snake_case_ = right_child
return root.get_data()
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
while True:
snake_case_ = root.get_left()
if left_child is None:
break
snake_case_ = left_child
return root.get_data()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> MyNode | None:
snake_case_ = root.get_left()
snake_case_ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
snake_case_ = get_left_most(_SCREAMING_SNAKE_CASE )
root.set_data(_SCREAMING_SNAKE_CASE )
root.set_right(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
elif left_child is not None:
snake_case_ = left_child
elif right_child is not None:
snake_case_ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
snake_case_ = left_rotation(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = rl_rotation(_SCREAMING_SNAKE_CASE )
elif get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
snake_case_ = right_rotation(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = lr_rotation(_SCREAMING_SNAKE_CASE )
snake_case_ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_SCREAMING_SNAKE_CASE )
return root
class __A :
'''simple docstring'''
def __init__( self : Dict ) ->None:
"""simple docstring"""
snake_case_ = None
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
return get_height(self.root )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Any ) ->None:
"""simple docstring"""
print("""insert:""" + str(UpperCAmelCase_ ) )
snake_case_ = insert_node(self.root , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Any ) ->None:
"""simple docstring"""
print("""delete:""" + str(UpperCAmelCase_ ) )
if self.root is None:
print("""Tree is empty!""" )
return
snake_case_ = del_node(self.root , UpperCAmelCase_ )
def __str__( self : Dict , ) ->str: # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
snake_case_ = """"""
snake_case_ = MyQueue()
q.push(self.root )
snake_case_ = self.get_height()
if layer == 0:
return output
snake_case_ = 0
while not q.is_empty():
snake_case_ = q.pop()
snake_case_ = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCAmelCase_ )
q.push(UpperCAmelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
snake_case_ = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , UpperCAmelCase_ ) - 1:
snake_case_ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _a ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__SCREAMING_SNAKE_CASE : Any = AVLtree()
__SCREAMING_SNAKE_CASE : Any = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 347
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = rotary_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = None
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = FlaxGPTJModelTester(self )
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = False
snake_case_ = model.config.eos_token_id
snake_case_ = jax.jit(model.generate )
snake_case_ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
snake_case_ = fx_state
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 347
| 1
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A ={
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__A ={
"t5-small": 5_1_2,
"t5-base": 5_1_2,
"t5-large": 5_1_2,
"t5-3b": 5_1_2,
"t5-11b": 5_1_2,
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = TaTokenizer
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase=100 , lowercase=None , **lowercase , ) -> Dict:
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase_ = [f'<extra_id_{i}>' for i in range(a_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCamelCase_ = len(set(filter(lambda lowercase : bool("extra_id_" in str(a_ ) ) , a_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
a_ , tokenizer_file=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , extra_ids=a_ , additional_special_tokens=a_ , **a_ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = extra_ids
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase , lowercase ) -> Any:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCamelCase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a_ , )
return max_model_length
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Any:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Optional[Any]:
lowerCamelCase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCamelCase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Optional[int]:
lowerCamelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
return list(
set(filter(lambda lowercase : bool(re.search(R"<extra_id_\d+>" , a_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return [self.convert_tokens_to_ids(a_ ) for token in self.get_sentinel_tokens()]
| 359
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase = 16 , lowercase = 88 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = 32 , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = "geglu" , lowercase = None , ) -> Any:
super().__init__()
lowerCamelCase_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase_ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase_ = [1, 0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase = True , ) -> int:
lowerCamelCase_ = hidden_states
lowerCamelCase_ = []
lowerCamelCase_ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase_ = self.transformer_index_for_condition[i]
lowerCamelCase_ = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase )
| 47
| 0
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
snake_case__ : List[str] = logging.get_logger(__name__)
class snake_case_( a__ ):
__UpperCamelCase = ['''input_features''', '''is_longer''']
def __init__( self : int , UpperCamelCase_ : Any=6_4 , UpperCamelCase_ : Dict=4_8_0_0_0 , UpperCamelCase_ : Dict=4_8_0 , UpperCamelCase_ : Optional[int]=1_0 , UpperCamelCase_ : str=1_0_2_4 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4_0_0_0 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : List[str] , ):
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : str = top_db
lowerCAmelCase : str = truncation
lowerCAmelCase : Optional[Any] = padding
lowerCAmelCase : List[Any] = fft_window_size
lowerCAmelCase : Any = (fft_window_size >> 1) + 1
lowerCAmelCase : Union[str, Any] = hop_length
lowerCAmelCase : int = max_length_s
lowerCAmelCase : str = max_length_s * sampling_rate
lowerCAmelCase : Any = sampling_rate
lowerCAmelCase : str = frequency_min
lowerCAmelCase : List[str] = frequency_max
lowerCAmelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='''htk''' , )
lowerCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
lowerCAmelCase : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ):
lowerCAmelCase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Any ):
lowerCAmelCase : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase : Union[str, Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase : List[Any] = [0]
# randomly choose index for each part
lowerCAmelCase : Dict = np.random.choice(ranges[0] )
lowerCAmelCase : str = np.random.choice(ranges[1] )
lowerCAmelCase : List[Any] = np.random.choice(ranges[2] )
lowerCAmelCase : Any = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase : int = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase : Optional[int] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase : List[Any] = torch.tensor(mel[None, None, :] )
lowerCAmelCase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = mel_shrink[0][0].numpy()
lowerCAmelCase : List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase : List[str] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase : Optional[int] = len(UpperCamelCase_ ) - max_length
lowerCAmelCase : List[str] = np.random.randint(0 , overflow + 1 )
lowerCAmelCase : List[Any] = waveform[idx : idx + max_length]
lowerCAmelCase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase : List[str] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
lowerCAmelCase : Union[str, Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase : Tuple = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase : Union[str, Any] = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase : Union[str, Any] = False
else:
lowerCAmelCase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase : Optional[Any] = int(max_length / len(UpperCamelCase_ ) )
lowerCAmelCase : Optional[int] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase : Any = int(max_length / len(UpperCamelCase_ ) )
lowerCAmelCase : Tuple = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Any = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase : Optional[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
lowerCAmelCase : List[str] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Optional[int] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : Optional[int] = truncation if truncation is not None else self.truncation
lowerCAmelCase : str = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase : Tuple = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase : Any = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
lowerCAmelCase : List[str] = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase : Tuple = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase : Union[str, Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
lowerCAmelCase : str = []
lowerCAmelCase : List[Any] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase : List[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
lowerCAmelCase : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
lowerCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase : Any = [[longer] for longer in is_longer]
lowerCAmelCase : str = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowerCAmelCase : int = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
lowerCAmelCase : Dict = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 60
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _snake_case ( _snake_case : list[list[float]] ):
lowerCAmelCase : str = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_snake_case ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase : int = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase : Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase, lowerCAmelCase : List[Any] = matrix[1][1], matrix[0][0]
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_snake_case ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_snake_case ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase : int = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowerCAmelCase : Dict = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase : Dict = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase : Any = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase : Any = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase : Optional[int] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase : Dict = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase : List[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase : str = array(_snake_case )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase : Optional[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase : Tuple = array(_snake_case )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_snake_case )
# Calculate the inverse of the matrix
return [[float(d(_snake_case ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 60
| 1
|
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__UpperCAmelCase = Mapping[str, np.ndarray]
__UpperCAmelCase = Mapping[str, Any] # Is a nested dict.
__UpperCAmelCase = 0.01
@dataclasses.dataclass(frozen=a__ )
class a__ :
'''simple docstring'''
lowercase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowercase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowercase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowercase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowercase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowercase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowercase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowercase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowercase__ : Optional[Sequence[int]] = None
def _snake_case ( A ) -> Protein:
lowerCAmelCase__ = R'''(\[[A-Z]+\]\n)'''
lowerCAmelCase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0]
lowerCAmelCase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowerCAmelCase__ = ["N", "CA", "C"]
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCAmelCase__ = g[1][0].strip()
for i in range(len(A ) ):
if seq[i] not in residue_constants.restypes:
lowerCAmelCase__ = '''X''' # FIXME: strings are immutable
lowerCAmelCase__ = np.array(
[residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCAmelCase__ = []
for axis in range(3 ):
tertiary.append(list(map(A , g[1][axis].split() ) ) )
lowerCAmelCase__ = np.array(A )
lowerCAmelCase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowerCAmelCase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCAmelCase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowerCAmelCase__ = np.zeros(
(
len(A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowerCAmelCase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , )
def _snake_case ( A , A = 0 ) -> List[str]:
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
lowerCAmelCase__ = prot.parents
lowerCAmelCase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCAmelCase__ = [p for i, p in zip(A , A ) if i == chain_id]
if parents is None or len(A ) == 0:
lowerCAmelCase__ = ['''N/A''']
pdb_headers.append(F"""PARENT {" ".join(A )}""" )
return pdb_headers
def _snake_case ( A , A ) -> str:
lowerCAmelCase__ = []
lowerCAmelCase__ = pdb_str.split('''\n''' )
lowerCAmelCase__ = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
lowerCAmelCase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowerCAmelCase__ = []
if prot.parents_chain_index is not None:
lowerCAmelCase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(A ) , [] )
parent_dict[str(A )].append(A )
lowerCAmelCase__ = max([int(A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCAmelCase__ = parent_dict.get(str(A ) , ['''N/A'''] )
parents_per_chain.append(A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCAmelCase__ = [['''N/A''']]
def make_parent_line(A ) -> str:
return F"""PARENT {" ".join(A )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCAmelCase__ = 0
for i, l in enumerate(A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(A ):
lowerCAmelCase__ = parents_per_chain[chain_counter]
else:
lowerCAmelCase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(A ) )
return "\n".join(A )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = residue_constants.restypes + ['''X''']
def res_atoa(A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowerCAmelCase__ = residue_constants.atom_types
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.atom_mask
lowerCAmelCase__ = prot.aatype
lowerCAmelCase__ = prot.atom_positions
lowerCAmelCase__ = prot.residue_index.astype(np.intaa )
lowerCAmelCase__ = prot.b_factors
lowerCAmelCase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowerCAmelCase__ = get_pdb_headers(A )
if len(A ) > 0:
pdb_lines.extend(A )
lowerCAmelCase__ = aatype.shape[0]
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = string.ascii_uppercase
lowerCAmelCase__ = None
# Add all atom sites.
for i in range(A ):
lowerCAmelCase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCAmelCase__ = '''ATOM'''
lowerCAmelCase__ = atom_name if len(A ) == 4 else F""" {atom_name}"""
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 1.00
lowerCAmelCase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''A'''
if chain_index is not None:
lowerCAmelCase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCAmelCase__ = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(A )
atom_index += 1
lowerCAmelCase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCAmelCase__ = True
lowerCAmelCase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCAmelCase__ = '''TER'''
lowerCAmelCase__ = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(A , A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(A )
def _snake_case ( A ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein:
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
| 228
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = CLIPTokenizer
lowercase__ : List[str] = CLIPTokenizerFast
lowercase__ : Dict = True
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
super().setUp()
# fmt: off
lowerCAmelCase__ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCAmelCase__ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
lowerCAmelCase__ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = '''lower newer'''
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase__ = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase__ = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase__ = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ = F"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowerCAmelCase__ = F""" {text}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().test_tokenization_python_rust_equals()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# CLIP always lower cases letters
pass
| 228
| 1
|
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__UpperCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=False , ) -> List[str]:
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict = False ) -> Optional[int]:
SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(A__ , torch_dtype=A__ ).to(A__ )
SCREAMING_SNAKE_CASE = Path(A__ )
# TEXT ENCODER
SCREAMING_SNAKE_CASE = pipeline.text_encoder.config.max_position_embeddings
SCREAMING_SNAKE_CASE = pipeline.text_encoder.config.hidden_size
SCREAMING_SNAKE_CASE = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=A__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=A__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=A__ , )
del pipeline.text_encoder
# UNET
SCREAMING_SNAKE_CASE = pipeline.unet.config.in_channels
SCREAMING_SNAKE_CASE = pipeline.unet.config.sample_size
SCREAMING_SNAKE_CASE = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
torch.randn(2 ).to(device=A__ , dtype=A__ ),
torch.randn(2 , A__ , A__ ).to(device=A__ , dtype=A__ ),
False,
) , output_path=A__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=A__ , use_external_data_format=A__ , )
SCREAMING_SNAKE_CASE = str(unet_path.absolute().as_posix() )
SCREAMING_SNAKE_CASE = os.path.dirname(A__ )
SCREAMING_SNAKE_CASE = onnx.load(A__ )
# clean up existing tensor files
shutil.rmtree(A__ )
os.mkdir(A__ )
# collate external tensor files into one
onnx.save_model(
A__ , A__ , save_as_external_data=A__ , all_tensors_to_one_file=A__ , location='weights.pb' , convert_attribute=A__ , )
del pipeline.unet
# VAE ENCODER
SCREAMING_SNAKE_CASE = pipeline.vae
SCREAMING_SNAKE_CASE = vae_encoder.config.in_channels
SCREAMING_SNAKE_CASE = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
SCREAMING_SNAKE_CASE = lambda SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : vae_encoder.encode(A__ , A__ )[0].sample()
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=A__ , )
# VAE DECODER
SCREAMING_SNAKE_CASE = pipeline.vae
SCREAMING_SNAKE_CASE = vae_decoder.config.latent_channels
SCREAMING_SNAKE_CASE = vae_decoder.config.out_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE = vae_encoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=A__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
SCREAMING_SNAKE_CASE = pipeline.safety_checker
SCREAMING_SNAKE_CASE = safety_checker.config.vision_config.num_channels
SCREAMING_SNAKE_CASE = safety_checker.config.vision_config.image_size
SCREAMING_SNAKE_CASE = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , A__ , A__ , A__ , ).to(device=A__ , dtype=A__ ),
torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=A__ , )
del pipeline.safety_checker
SCREAMING_SNAKE_CASE = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
SCREAMING_SNAKE_CASE = pipeline.feature_extractor
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=A__ , feature_extractor=A__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(A__ )
print('ONNX pipeline saved to' , A__ )
del pipeline
del onnx_pipeline
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(A__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
__UpperCamelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 113
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowerCAmelCase__ : Union[str, Any] = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
snake_case__ : Optional[Any] = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case__ : Dict = int(re.match(r'.*layer_(\d*).*' , A__ )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def UpperCamelCase__ ( A__ ) -> str:
if dtype == torch.bool:
return 1 / 8
snake_case__ : List[str] = re.search(r'[^\d](\d+)$' , str(A__ ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
snake_case__ : Union[str, Any] = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
# Construct model
if bloom_config_file == "":
snake_case__ : Union[str, Any] = BloomConfig()
else:
snake_case__ : int = BloomConfig.from_json_file(A__ )
if shard_model:
snake_case__ : Tuple = os.listdir(A__ )
snake_case__ : str = sorted(filter(lambda A__ : s.startswith('layer' ) and "model_00" in s , A__ ) )
snake_case__ : str = {'weight_map': {}, 'metadata': {}}
snake_case__ : Optional[int] = 0
snake_case__ : Tuple = None
snake_case__ : Any = BloomConfig()
for j, file in enumerate(A__ ):
print('Processing file: {}'.format(A__ ) )
snake_case__ : str = None
for i in range(A__ ):
# load all TP files
snake_case__ : Optional[int] = file.replace('model_00' , F"""model_0{i}""" )
snake_case__ : int = torch.load(os.path.join(A__ , A__ ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case__ : List[Any] = list(temp.keys() )
for key in keys:
snake_case__ : List[Any] = temp.pop(A__ )
if tensors is None:
snake_case__ : Optional[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : Optional[int] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Dict = tensors[key] / pretraining_tp
torch.save(
A__ , os.path.join(
A__ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case__ : List[Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case__ : Optional[int] = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) )
snake_case__ : Dict = BloomConfig()
snake_case__ : str = pytorch_dump_folder_path + '/' + CONFIG_NAME
snake_case__ : int = total_size
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A__ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
snake_case__ : List[str] = json.dumps(A__ , indent=2 , sort_keys=A__ ) + '\n'
f.write(A__ )
else:
snake_case__ : int = BloomModel(A__ )
snake_case__ : Dict = os.listdir(A__ )
snake_case__ : Union[str, Any] = sorted(filter(lambda A__ : s.startswith('layer' ) and "model_00" in s , A__ ) )
snake_case__ : List[str] = None
for i, file in enumerate(A__ ):
snake_case__ : Dict = None
for i in range(A__ ):
# load all TP files
snake_case__ : List[Any] = file.replace('model_00' , F"""model_0{i}""" )
snake_case__ : int = torch.load(os.path.join(A__ , A__ ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case__ : List[str] = list(temp.keys() )
for key in keys:
snake_case__ : Any = temp.pop(A__ )
if tensors is None:
snake_case__ : Union[str, Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Optional[int] = tensors[key] / pretraining_tp
snake_case__ : int = model.load_state_dict(A__ , strict=A__ )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
snake_case__ : List[Any] = set(other_keys.missing_keys )
else:
snake_case__ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(A__ , exist_ok=A__ )
snake_case__ : Any = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
snake_case__ : List[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
snake_case__ : str = model.to(config.torch_dtype )
torch.save(model.state_dict() , A__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowerCAmelCase__ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 143
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : int , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : List[str] = 20
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._get_uniform_logits(batch_size=2 , length=lowercase_)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_ : Tuple = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_ : str = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_ : int = jax.nn.softmax(lowercase_ , axis=-1)
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_ : Dict = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_ : Optional[int] = jax.nn.softmax(temp_dist_warper_sharper(lowercase_ , scores.copy() , cur_len=lowercase_) , axis=-1)
SCREAMING_SNAKE_CASE_ : List[Any] = jax.nn.softmax(temp_dist_warper_smoother(lowercase_ , scores.copy() , cur_len=lowercase_) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Optional[int] = 10
SCREAMING_SNAKE_CASE_ : Any = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(lowercase_)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_ : int = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_ : Optional[int] = top_k_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_ : Optional[int] = 5
SCREAMING_SNAKE_CASE_ : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_ : List[str] = np.broadcast_to(np.arange(lowercase_)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_ : Tuple = top_k_warp_safety_check(lowercase_ , lowercase_ , cur_len=lowercase_)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : int = 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_ : List[str] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.exp(top_p_warp(lowercase_ , lowercase_ , cur_len=lowercase_))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_ : List[str] = np.broadcast_to(np.arange(lowercase_)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_ : Optional[int] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_ : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_ : int = top_p_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = 20
SCREAMING_SNAKE_CASE_ : Optional[Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase_)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_ : str = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_ : Any = 5
SCREAMING_SNAKE_CASE_ : List[Any] = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = min_dist_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''')])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_ : int = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = 15
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min_dist_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertFalse(jnp.isinf(lowercase_).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = 20
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase_)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : List[Any] = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = logits_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertFalse(jnp.isinf(lowercase_).any())
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = 20
SCREAMING_SNAKE_CASE_ : str = 4
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : List[str] = 5
SCREAMING_SNAKE_CASE_ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase_ , eos_token_id=lowercase_)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_ : Optional[Any] = 4
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logits_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_ : Optional[int] = 3
SCREAMING_SNAKE_CASE_ : Any = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertFalse(jnp.isinf(lowercase_).any())
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 15
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor((batch_size, sequence_length) , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase_ , eos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 10
# no processor list
SCREAMING_SNAKE_CASE_ : Tuple = temp_dist_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = top_k_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = top_p_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : int = min_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = bos_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : str = eos_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
# with processor list
SCREAMING_SNAKE_CASE_ : List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_ : Dict = processor(lowercase_ , lowercase_ , cur_len=lowercase_)
# scores should be equal
self.assertTrue(jnp.allclose(lowercase_ , lowercase_ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = 4
SCREAMING_SNAKE_CASE_ : int = 10
SCREAMING_SNAKE_CASE_ : List[Any] = 15
SCREAMING_SNAKE_CASE_ : str = 2
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor((batch_size, sequence_length) , lowercase_)
SCREAMING_SNAKE_CASE_ : int = input_ids.copy()
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : str = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : str = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase_ , eos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : str = 10
# no processor list
def run_no_processor_list(lowercase_ : str , lowercase_ : Tuple , lowercase_ : str):
SCREAMING_SNAKE_CASE_ : Tuple = temp_dist_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : int = top_k_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_p_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = min_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = bos_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = eos_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
return scores
# with processor list
def run_processor_list(lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Any):
SCREAMING_SNAKE_CASE_ : int = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(lowercase_ , lowercase_ , cur_len=lowercase_)
return scores
SCREAMING_SNAKE_CASE_ : List[str] = jax.jit(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.jit(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jitted_run_no_processor_list(lowercase_ , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : int = jitted_run_processor_list(lowercase_ , lowercase_ , lowercase_)
# scores should be equal
self.assertTrue(jnp.allclose(lowercase_ , lowercase_ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 365
|
"""simple docstring"""
from itertools import permutations
def _A (__a ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
SCREAMING_SNAKE_CASE_ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _A (__a = 10 ) -> int:
"""simple docstring"""
return sum(
int(''''''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318
| 0
|
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 600851475143 ) -> int:
'''simple docstring'''
try:
A__ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
A__ = 2
A__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A__ = i
while n % i == 0:
A__ = n // i
i += 1
return int(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 7
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = NllbTokenizer
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
snake_case_ = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, legacy_behaviour=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
snake_case_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens})
snake_case_ = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ = src_lang if src_lang is not None else 'eng_Latn'
snake_case_ = self.convert_tokens_to_ids(self._src_lang)
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def a_ ( self) -> str:
return self._src_lang
@src_lang.setter
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
snake_case_ = src_lang
snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__)
snake_case_ = tgt_lang_id
return inputs
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "eng_Latn", lowerCAmelCase__ = None, lowerCAmelCase__ = "fra_Latn", **lowerCAmelCase__, ) -> BatchEncoding:
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang)
def a_ ( self) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__)
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens)
snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens)
snake_case_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), )
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__)
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens)
snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens)
snake_case_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 69
| 0
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase ( nn.Module ):
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 0.0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = jnp.floataa
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : str = []
_UpperCAmelCase : Dict = []
for i in range(self.num_layers ):
_UpperCAmelCase : Optional[int] = self.in_channels if i == 0 else self.out_channels
_UpperCAmelCase : Dict = FlaxResnetBlockaD(
in_channels=a_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(a_ )
_UpperCAmelCase : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(a_ )
_UpperCAmelCase : Dict = resnets
_UpperCAmelCase : Any = attentions
if self.add_downsample:
_UpperCAmelCase : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,a_ ,a_ ,a_ ,a_=True ) -> Any:
_UpperCAmelCase : Optional[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
_UpperCAmelCase : Any = resnet(a_ ,a_ ,deterministic=a_ )
_UpperCAmelCase : List[str] = attn(a_ ,a_ ,deterministic=a_ )
output_states += (hidden_states,)
if self.add_downsample:
_UpperCAmelCase : Any = self.downsamplers_a(a_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase ( nn.Module ):
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 0.0
UpperCAmelCase = 1
UpperCAmelCase = True
UpperCAmelCase = jnp.floataa
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Optional[int] = []
for i in range(self.num_layers ):
_UpperCAmelCase : str = self.in_channels if i == 0 else self.out_channels
_UpperCAmelCase : List[str] = FlaxResnetBlockaD(
in_channels=a_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(a_ )
_UpperCAmelCase : Optional[int] = resnets
if self.add_downsample:
_UpperCAmelCase : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,a_ ,a_ ,a_=True ) -> str:
_UpperCAmelCase : str = ()
for resnet in self.resnets:
_UpperCAmelCase : Optional[Any] = resnet(a_ ,a_ ,deterministic=a_ )
output_states += (hidden_states,)
if self.add_downsample:
_UpperCAmelCase : int = self.downsamplers_a(a_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase ( nn.Module ):
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 0.0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = jnp.floataa
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : List[Any] = []
for i in range(self.num_layers ):
_UpperCAmelCase : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_UpperCAmelCase : int = self.prev_output_channel if i == 0 else self.out_channels
_UpperCAmelCase : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(a_ )
_UpperCAmelCase : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(a_ )
_UpperCAmelCase : List[str] = resnets
_UpperCAmelCase : Tuple = attentions
if self.add_upsample:
_UpperCAmelCase : int = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,a_ ,a_ ,a_ ,a_ ,a_=True ) -> Optional[Any]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
_UpperCAmelCase : Dict = res_hidden_states_tuple[-1]
_UpperCAmelCase : str = res_hidden_states_tuple[:-1]
_UpperCAmelCase : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
_UpperCAmelCase : List[str] = resnet(a_ ,a_ ,deterministic=a_ )
_UpperCAmelCase : str = attn(a_ ,a_ ,deterministic=a_ )
if self.add_upsample:
_UpperCAmelCase : Optional[int] = self.upsamplers_a(a_ )
return hidden_states
class lowercase ( nn.Module ):
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 0.0
UpperCAmelCase = 1
UpperCAmelCase = True
UpperCAmelCase = jnp.floataa
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = []
for i in range(self.num_layers ):
_UpperCAmelCase : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_UpperCAmelCase : str = self.prev_output_channel if i == 0 else self.out_channels
_UpperCAmelCase : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(a_ )
_UpperCAmelCase : Dict = resnets
if self.add_upsample:
_UpperCAmelCase : Dict = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,a_ ,a_ ,a_ ,a_=True ) -> int:
for resnet in self.resnets:
# pop res hidden states
_UpperCAmelCase : Any = res_hidden_states_tuple[-1]
_UpperCAmelCase : List[str] = res_hidden_states_tuple[:-1]
_UpperCAmelCase : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
_UpperCAmelCase : Dict = resnet(a_ ,a_ ,deterministic=a_ )
if self.add_upsample:
_UpperCAmelCase : str = self.upsamplers_a(a_ )
return hidden_states
class lowercase ( nn.Module ):
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 0.0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = jnp.floataa
def _snake_case ( self ) -> List[str]:
# there is always at least one resnet
_UpperCAmelCase : Optional[int] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
_UpperCAmelCase : List[Any] = []
for _ in range(self.num_layers ):
_UpperCAmelCase : str = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(a_ )
_UpperCAmelCase : List[Any] = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(a_ )
_UpperCAmelCase : Optional[Any] = resnets
_UpperCAmelCase : List[str] = attentions
def __call__( self ,a_ ,a_ ,a_ ,a_=True ) -> List[Any]:
_UpperCAmelCase : Any = self.resnets[0](a_ ,a_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
_UpperCAmelCase : int = attn(a_ ,a_ ,deterministic=a_ )
_UpperCAmelCase : List[Any] = resnet(a_ ,a_ ,deterministic=a_ )
return hidden_states
| 349
|
'''simple docstring'''
from __future__ import annotations
import math
def snake_case_ ( lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
if num <= 0:
_UpperCAmelCase : List[Any] = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = [True] * (num + 1)
_UpperCAmelCase : int = []
_UpperCAmelCase : int = 2
_UpperCAmelCase : int = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase_ ):
if sieve[i] is True:
_UpperCAmelCase : Tuple = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 349
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Dict = '''xlnet'''
_snake_case : Optional[int] = ['''mems''']
_snake_case : List[str] = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=3_2_0_0_0 , _UpperCamelCase=1_0_2_4 , _UpperCamelCase=2_4 , _UpperCamelCase=1_6 , _UpperCamelCase=4_0_9_6 , _UpperCamelCase="gelu" , _UpperCamelCase=True , _UpperCamelCase="bi" , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=-1 , _UpperCamelCase=False , _UpperCamelCase="last" , _UpperCamelCase=True , _UpperCamelCase="tanh" , _UpperCamelCase=0.1 , _UpperCamelCase=5 , _UpperCamelCase=5 , _UpperCamelCase=5 , _UpperCamelCase=1 , _UpperCamelCase=2 , **_UpperCamelCase , ) -> Any:
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : int = d_model
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Tuple = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
UpperCAmelCase_ : Dict = d_model // n_head
UpperCAmelCase_ : int = ff_activation
UpperCAmelCase_ : Tuple = d_inner
UpperCAmelCase_ : Any = untie_r
UpperCAmelCase_ : Optional[int] = attn_type
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : int = dropout
UpperCAmelCase_ : Optional[int] = mem_len
UpperCAmelCase_ : str = reuse_len
UpperCAmelCase_ : List[Any] = bi_data
UpperCAmelCase_ : Tuple = clamp_len
UpperCAmelCase_ : Dict = same_length
UpperCAmelCase_ : int = summary_type
UpperCAmelCase_ : Optional[Any] = summary_use_proj
UpperCAmelCase_ : List[str] = summary_activation
UpperCAmelCase_ : Dict = summary_last_dropout
UpperCAmelCase_ : str = start_n_top
UpperCAmelCase_ : str = end_n_top
UpperCAmelCase_ : Any = bos_token_id
UpperCAmelCase_ : Tuple = pad_token_id
UpperCAmelCase_ : str = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , _UpperCamelCase , )
UpperCAmelCase_ : Any = kwargs['use_cache']
UpperCAmelCase_ : Union[str, Any] = use_mems_eval
UpperCAmelCase_ : Union[str, Any] = use_mems_train
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> Tuple:
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 29
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( A__ ):
A__ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
A__ = 'CIDAS/clipseg-rd64-refined'
A__ = 'image_segmenter'
A__ = CLIPSegForImageSegmentation
A__ = ['image', 'text']
A__ = ['image']
def __init__( self : Any , *_a : Dict , **_a : str ) -> Any:
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*_a , **_a )
def A ( self : int , _a : "Image" , _a : str ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_a , return_tensors='pt' )
def A ( self : Dict , _a : Dict ) -> str:
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE =self.model(**_a ).logits
return logits
def A ( self : Any , _a : str ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =outputs.cpu().detach().numpy()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 47
| 0
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowerCamelCase : Optional[Any] =logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Optional[Any]:
UpperCamelCase__ : int = bnb_quantization_config.load_in_abit
UpperCamelCase__ : List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
UpperCamelCase__ : List[Any] = []
# custom device map
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(device_map.keys() ) > 1:
UpperCamelCase__ : int = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase__ : int = get_keys_to_not_convert(__lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCAmelCase )
UpperCamelCase__ : Tuple = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCAmelCase )
# compatibility with peft
UpperCamelCase__ : Union[str, Any] = load_in_abit
UpperCamelCase__ : str = load_in_abit
UpperCamelCase__ : Tuple = get_parameter_device(__lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
UpperCamelCase__ : int = replace_with_bnb_layers(__lowerCAmelCase , __lowerCAmelCase , modules_to_not_convert=__lowerCAmelCase )
# convert param to the right dtype
UpperCamelCase__ : Optional[Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase__ : List[Any] = name.replace(".weight" , "" ).replace(".bias" , "" )
UpperCamelCase__ : Union[str, Any] = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCAmelCase ):
param.to(__lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
UpperCamelCase__ : Optional[int] = replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , modules_to_not_convert=__lowerCAmelCase )
UpperCamelCase__ : Optional[int] = get_quantized_model_device_map(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , max_memory=__lowerCAmelCase , no_split_module_classes=__lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase__ : Optional[Any] = True
UpperCamelCase__ : int = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCAmelCase , offload_state_dict=__lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCAmelCase , device_map=__lowerCAmelCase , offload_dir=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> int:
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase__ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
UpperCamelCase__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase__ : Any = {}
UpperCamelCase__ : Optional[int] = special_dtypes
UpperCamelCase__ : str = no_split_module_classes
UpperCamelCase__ : Optional[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase__ : str = get_balanced_memory(
__lowerCAmelCase , low_zero=(device_map == "balanced_low_0") , max_memory=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ : int = max_memory
UpperCamelCase__ : Any = infer_auto_device_map(__lowerCAmelCase , **__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# check if don't have any quantized module on the cpu
UpperCamelCase__ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase__ : List[str] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Tuple:
if modules_to_not_convert is None:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Tuple = _replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Any:
UpperCamelCase__ : str = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase__ : Tuple = []
current_key_name.append(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase__ : Optional[Any] = ".".join(__lowerCAmelCase )
UpperCamelCase__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase__ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase__ : str = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase__ : int = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
UpperCamelCase__ : Optional[Any] = module.weight.data
if module.bias is not None:
UpperCamelCase__ : Optional[Any] = module.bias.data
bnb_module.requires_grad_(__lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = True
if len(list(module.children() ) ) > 0:
UpperCamelCase__ : int = _replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : Any = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]:
# Create a copy of the model
with init_empty_weights():
UpperCamelCase__ : Tuple = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase__ : str = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase__ : str = sum(__lowerCAmelCase , [] )
UpperCamelCase__ : List[Any] = len(__lowerCAmelCase ) > 0
# Check if it is a base model
UpperCamelCase__ : Any = False
if hasattr(__lowerCAmelCase , "base_model_prefix" ):
UpperCamelCase__ : int = not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase__ : Tuple = list(model.named_children() )
UpperCamelCase__ : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase__ : List[str] = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
UpperCamelCase__ : Optional[int] = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
UpperCamelCase__ : List[str] = [".weight", ".bias"]
UpperCamelCase__ : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase__ : str = name.replace(__lowerCAmelCase , "" )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
for m in model.modules():
if isinstance(__lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCAmelCase , __lowerCAmelCase , 0 , dtype=__lowerCAmelCase , value=__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = param_name
UpperCamelCase__ : Optional[Any] = model
if "." in tensor_name:
UpperCamelCase__ : Dict = tensor_name.split("." )
for split in splits[:-1]:
UpperCamelCase__ : Union[str, Any] = getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase__ : Any = new_module
UpperCamelCase__ : Dict = splits[-1]
# offload weights
UpperCamelCase__ : str = False
offload_weight(module._parameters[tensor_name] , __lowerCAmelCase , __lowerCAmelCase , index=__lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __lowerCAmelCase , index=__lowerCAmelCase , )
else:
offload_weight(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index=__lowerCAmelCase )
offload_weight(__lowerCAmelCase , param_name.replace("weight" , "SCB" ) , __lowerCAmelCase , index=__lowerCAmelCase )
set_module_tensor_to_device(__lowerCAmelCase , __lowerCAmelCase , "meta" , dtype=__lowerCAmelCase , value=torch.empty(*param.size() ) )
| 366
|
lowerCamelCase : Optional[int] ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
UpperCamelCase__ : Optional[Any] = set()
# keep track of all the paths to be checked
UpperCamelCase__ : Optional[Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase__ : int = queue.pop(0 )
# get the last node from the path
UpperCamelCase__ : Dict = path[-1]
if node not in explored:
UpperCamelCase__ : Tuple = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase__ : List[str] = list(__lowerCAmelCase )
new_path.append(__lowerCAmelCase )
queue.append(__lowerCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__lowerCAmelCase )
# in case there's no path between the 2 nodes
return []
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase__ : Tuple = [start]
UpperCamelCase__ : Optional[int] = set(__lowerCAmelCase )
# Keep tab on distances from `start` node.
UpperCamelCase__ : str = {start: 0, target: -1}
while queue:
UpperCamelCase__ : Any = queue.pop(0 )
if node == target:
UpperCamelCase__ : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__lowerCAmelCase )
queue.append(__lowerCAmelCase )
UpperCamelCase__ : List[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 196
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__UpperCamelCase : Dict = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __A ( __lowerCamelCase ) -> List[str]:
a = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
a = line.strip()
if line:
a = line.split()
a = line_number
a = words[0]
a = value
return result
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
for attribute in key.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
a = PARAM_MAPPING[full_name.split(""".""" )[-1]]
a = """param"""
if weight_type is not None and weight_type != "param":
a = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
a = hf_pointer
for attribute in hf_param_name.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = shape_pointer.shape
# let's reduce dimension
a = value[0]
else:
a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = value
else:
a = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
a = PARAM_MAPPING[full_name.split(""".""" )[-1]]
a = """param"""
if weight_type is not None and weight_type != "param":
a = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a = """.""".join([key, hf_param_name] )
else:
a = key
a = value if """lm_head""" in full_key else value[0]
__UpperCamelCase : List[Any] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ) -> Optional[Any]:
a = False
for key, mapped_key in MAPPING.items():
a = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
a = True
if "*" in mapped_key:
a = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
a = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
a = """weight_g"""
elif "weight_v" in name:
a = """weight_v"""
elif "bias" in name:
a = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a = """weight"""
else:
a = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
a = []
a = fairseq_model.state_dict()
a = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
a = True
else:
a = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
a = full_name.split("""conv_layers.""" )[-1]
a = name.split(""".""" )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=False ) -> List[Any]:
if config_path is not None:
a = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
a = WavaVecaConfig()
if is_seq_class:
a = read_txt_into_dict(__lowerCamelCase )
a = idalabel
a = WavaVecaForSequenceClassification(__lowerCamelCase )
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
a = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a = target_dict.pad_index
a = target_dict.bos_index
a = target_dict.eos_index
a = len(target_dict.symbols )
a = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
a = target_dict.indices
# fairseq has the <pad> and <s> switched
a = 0
a = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
a = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
a = True if config.feat_extract_norm == """layer""" else False
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
a = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
a = WavaVecaForCTC(__lowerCamelCase )
else:
a = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
a = argparse.Namespace(task="""audio_pretraining""" )
a = fairseq.tasks.setup_task(__lowerCamelCase )
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
a = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
__UpperCamelCase : Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 228
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = ShapEImgaImgPipeline
UpperCamelCase__ = ['''image''']
UpperCamelCase__ = ['''image''']
UpperCamelCase__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ = False
@property
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return 8
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
a = CLIPVisionModel(__magic_name__ )
return model
@property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__magic_name__ , do_normalize=__magic_name__ , do_resize=__magic_name__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
a = PriorTransformer(**__magic_name__ )
return model
@property
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
a = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__magic_name__ )
return model
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.dummy_prior
a = self.dummy_image_encoder
a = self.dummy_image_processor
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=__magic_name__ , clip_sample=__magic_name__ , clip_sample_range=1.0 , )
a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :str , __magic_name__ :Tuple=0 ):
'''simple docstring'''
a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
if str(__magic_name__ ).startswith("""mps""" ):
a = torch.manual_seed(__magic_name__ )
else:
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = """cpu"""
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
a = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = pipe(**self.get_dummy_inputs(__magic_name__ ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = torch_device == """cpu"""
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__magic_name__ , relax_max_difference=__magic_name__ , )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
a = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = 1
a = 2
a = self.get_dummy_inputs(__magic_name__ )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__magic_name__ , num_images_per_prompt=__magic_name__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
a = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
a = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = torch.Generator(device=__magic_name__ ).manual_seed(0 )
a = pipe(
__magic_name__ , generator=__magic_name__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 228
| 1
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 355
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = '''mobilenet_v1'''
def __init__( self : int , _a : Tuple=3 , _a : str=2_2_4 , _a : Dict=1.0 , _a : List[Any]=8 , _a : Tuple="relu6" , _a : Dict=True , _a : Optional[int]=0.9_9_9 , _a : List[Any]=0.0_2 , _a : Optional[Any]=0.0_0_1 , **_a : Optional[int] , ):
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__: str =num_channels
a__: Union[str, Any] =image_size
a__: Dict =depth_multiplier
a__: Union[str, Any] =min_depth
a__: Any =hidden_act
a__: int =tf_padding
a__: Dict =classifier_dropout_prob
a__: Any =initializer_range
a__: List[str] =layer_norm_eps
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self : int ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowerCamelCase ( self : Tuple ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowerCamelCase ( self : Dict ):
return 1e-4
| 42
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 188
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : int = logging.get_logger(__name__)
A_ : Tuple = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class a_ ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = "deta"
lowerCamelCase__ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=9_0_0, lowerCamelCase_=2_0_4_8, lowerCamelCase_=6, lowerCamelCase_=2_0_4_8, lowerCamelCase_=8, lowerCamelCase_=6, lowerCamelCase_=1_0_2_4, lowerCamelCase_=8, lowerCamelCase_=0.0, lowerCamelCase_=True, lowerCamelCase_="relu", lowerCamelCase_=2_5_6, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.02, lowerCamelCase_=1.0, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_="sine", lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=4, lowerCamelCase_=True, lowerCamelCase_=3_0_0, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=1, lowerCamelCase_=5, lowerCamelCase_=2, lowerCamelCase_=1, lowerCamelCase_=1, lowerCamelCase_=5, lowerCamelCase_=2, lowerCamelCase_=0.1, lowerCamelCase_=0.25, **lowerCamelCase_, ):
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCamelCase__ : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(a__, a__ ):
lowerCamelCase__ : int = backbone_config.pop('model_type' )
lowerCamelCase__ : int = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Optional[Any] = config_class.from_dict(a__ )
lowerCamelCase__ : Any = backbone_config
lowerCamelCase__ : Optional[Any] = num_queries
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Optional[Any] = encoder_ffn_dim
lowerCamelCase__ : int = encoder_layers
lowerCamelCase__ : Dict = encoder_attention_heads
lowerCamelCase__ : List[str] = decoder_ffn_dim
lowerCamelCase__ : Optional[int] = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : Dict = dropout
lowerCamelCase__ : Optional[int] = attention_dropout
lowerCamelCase__ : Tuple = activation_dropout
lowerCamelCase__ : Optional[Any] = activation_function
lowerCamelCase__ : Optional[Any] = init_std
lowerCamelCase__ : int = init_xavier_std
lowerCamelCase__ : List[Any] = encoder_layerdrop
lowerCamelCase__ : List[Any] = auxiliary_loss
lowerCamelCase__ : List[Any] = position_embedding_type
# deformable attributes
lowerCamelCase__ : Tuple = num_feature_levels
lowerCamelCase__ : str = encoder_n_points
lowerCamelCase__ : List[str] = decoder_n_points
lowerCamelCase__ : Any = two_stage
lowerCamelCase__ : Any = two_stage_num_proposals
lowerCamelCase__ : Tuple = with_box_refine
lowerCamelCase__ : Tuple = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCamelCase__ : Optional[Any] = class_cost
lowerCamelCase__ : Optional[Any] = bbox_cost
lowerCamelCase__ : Dict = giou_cost
# Loss coefficients
lowerCamelCase__ : Any = mask_loss_coefficient
lowerCamelCase__ : Optional[Any] = dice_loss_coefficient
lowerCamelCase__ : Optional[Any] = bbox_loss_coefficient
lowerCamelCase__ : Optional[int] = giou_loss_coefficient
lowerCamelCase__ : Optional[int] = eos_coefficient
lowerCamelCase__ : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=a__, **a__ )
@property
def a__ (self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def a__ (self ):
'''simple docstring'''
return self.d_model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Optional[Any] = self.backbone_config.to_dict()
lowerCamelCase__ : List[Any] = self.__class__.model_type
return output
| 364
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 0
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCAmelCase__ ( nn.Module):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = jnp.floataa
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = []
__UpperCamelCase = []
for i in range(self.num_layers ):
__UpperCamelCase = self.in_channels if i == 0 else self.out_channels
__UpperCamelCase = FlaxResnetBlockaD(
in_channels=lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase )
__UpperCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase )
__UpperCamelCase = resnets
__UpperCamelCase = attentions
if self.add_downsample:
__UpperCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowercase , lowercase , lowercase , lowercase=True ) -> Dict:
__UpperCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__UpperCamelCase = resnet(lowercase , lowercase , deterministic=lowercase )
__UpperCamelCase = attn(lowercase , lowercase , deterministic=lowercase )
output_states += (hidden_states,)
if self.add_downsample:
__UpperCamelCase = self.downsamplers_a(lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase__ ( nn.Module):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = jnp.floataa
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = []
for i in range(self.num_layers ):
__UpperCamelCase = self.in_channels if i == 0 else self.out_channels
__UpperCamelCase = FlaxResnetBlockaD(
in_channels=lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase )
__UpperCamelCase = resnets
if self.add_downsample:
__UpperCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowercase , lowercase , lowercase=True ) -> Any:
__UpperCamelCase = ()
for resnet in self.resnets:
__UpperCamelCase = resnet(lowercase , lowercase , deterministic=lowercase )
output_states += (hidden_states,)
if self.add_downsample:
__UpperCamelCase = self.downsamplers_a(lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase__ ( nn.Module):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = jnp.floataa
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = []
__UpperCamelCase = []
for i in range(self.num_layers ):
__UpperCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__UpperCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__UpperCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase )
__UpperCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase )
__UpperCamelCase = resnets
__UpperCamelCase = attentions
if self.add_upsample:
__UpperCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowercase , lowercase , lowercase , lowercase , lowercase=True ) -> Any:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__UpperCamelCase = res_hidden_states_tuple[-1]
__UpperCamelCase = res_hidden_states_tuple[:-1]
__UpperCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__UpperCamelCase = resnet(lowercase , lowercase , deterministic=lowercase )
__UpperCamelCase = attn(lowercase , lowercase , deterministic=lowercase )
if self.add_upsample:
__UpperCamelCase = self.upsamplers_a(lowercase )
return hidden_states
class UpperCAmelCase__ ( nn.Module):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = jnp.floataa
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = []
for i in range(self.num_layers ):
__UpperCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__UpperCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__UpperCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase )
__UpperCamelCase = resnets
if self.add_upsample:
__UpperCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowercase , lowercase , lowercase , lowercase=True ) -> Tuple:
for resnet in self.resnets:
# pop res hidden states
__UpperCamelCase = res_hidden_states_tuple[-1]
__UpperCamelCase = res_hidden_states_tuple[:-1]
__UpperCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__UpperCamelCase = resnet(lowercase , lowercase , deterministic=lowercase )
if self.add_upsample:
__UpperCamelCase = self.upsamplers_a(lowercase )
return hidden_states
class UpperCAmelCase__ ( nn.Module):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = jnp.floataa
def __lowerCamelCase ( self ) -> Optional[Any]:
# there is always at least one resnet
__UpperCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__UpperCamelCase = []
for _ in range(self.num_layers ):
__UpperCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase )
__UpperCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase )
__UpperCamelCase = resnets
__UpperCamelCase = attentions
def __call__( self , lowercase , lowercase , lowercase , lowercase=True ) -> Dict:
__UpperCamelCase = self.resnets[0](lowercase , lowercase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__UpperCamelCase = attn(lowercase , lowercase , deterministic=lowercase )
__UpperCamelCase = resnet(lowercase , lowercase , deterministic=lowercase )
return hidden_states
| 349
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 349
| 1
|
def lowercase_ ( A__ , A__ ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(A__ ) , A__ )
return number - int(A__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 137
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase_ ( A__ ) -> list[list[float]]:
"""simple docstring"""
snake_case = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
snake_case = [[0.0, 0.0], [0.0, 0.0]]
snake_case , snake_case = matrix[1][1], matrix[0][0]
snake_case , snake_case = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
snake_case = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case = array(A__ )
for i in range(3 ):
for j in range(3 ):
snake_case = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 137
| 1
|
import math
def __UpperCamelCase ( _A , _A ):
return math.pow(_A , 2 ) - a
def __UpperCamelCase ( _A ):
return 2 * x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 2.0
while start <= a:
lowerCAmelCase_ = math.pow(_A , 2 )
return start
def __UpperCamelCase ( _A , _A = 9999 , _A = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
if a < 0:
raise ValueError('''math domain error''' )
lowerCAmelCase_ = get_initial_point(_A )
for _ in range(_A ):
lowerCAmelCase_ = value
lowerCAmelCase_ = value - fx(_A , _A ) / fx_derivative(_A )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 278
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case ) -> List[str]:
lowercase__: List[str] = set()
lowercase__: List[Any] = []
def parse_line(snake_case ):
for line in fp:
if isinstance(snake_case , snake_case ):
lowercase__: Optional[Any] = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case ) > 0:
lowercase__: List[str] = '\n'.join(snake_case )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(snake_case )
buffer.clear()
continue
else:
lowercase__: Union[str, Any] = line.strip()
buffer.append(snake_case )
if from_gh:
for filename in os.listdir(snake_case ):
lowercase__: Dict = os.path.join(snake_case , snake_case )
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case ) as fp:
parse_line(snake_case )
else:
try:
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case ) as fp:
parse_line(snake_case )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case_ ( snake_case , snake_case ) -> Any:
lowercase__: Optional[Any] = set()
lowercase__: int = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) )
return selected_warnings
if __name__ == "__main__":
def snake_case_ ( snake_case ) -> str:
return values.split(',' )
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__lowerCAmelCase = extract_warnings(args.output_dir, args.targets)
__lowerCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 196
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369
|
from __future__ import annotations
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case :List[Any] = logging.get_logger(__name__)
__snake_case :Dict = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = '''mobilenet_v2'''
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : int=224 , __SCREAMING_SNAKE_CASE : Optional[int]=1.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=8 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : int=6 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict="relu6" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=0.8 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : Dict=255 , **__SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''')
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = depth_divisible_by
__a = min_depth
__a = expand_ratio
__a = output_stride
__a = first_layer_is_expansion
__a = finegrained_output
__a = hidden_act
__a = tf_padding
__a = classifier_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = semantic_loss_ignore_index
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Any = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})])
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})])
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})])
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return 1E-4
| 49
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE__ ( __A = 1_500_000 ) -> int:
_snake_case = defaultdict(__A )
_snake_case = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
_snake_case = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =KandinskyImgaImgPipeline
SCREAMING_SNAKE_CASE_ =['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
SCREAMING_SNAKE_CASE_ =[
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
SCREAMING_SNAKE_CASE_ =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE_ =False
@property
def __a ( self : str ):
'''simple docstring'''
return 3_2
@property
def __a ( self : str ):
'''simple docstring'''
return 3_2
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self : int ):
'''simple docstring'''
return 1_0_0
@property
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCAmelCase__ : List[Any] = MultilingualCLIP(snake_case__ )
UpperCAmelCase__ : Tuple = text_encoder.eval()
return text_encoder
@property
def __a ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase__ : Any = UNetaDConditionModel(**snake_case__ )
return model
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.dummy_text_encoder
UpperCAmelCase__ : Tuple = self.dummy_tokenizer
UpperCAmelCase__ : List[str] = self.dummy_unet
UpperCAmelCase__ : Dict = self.dummy_movq
UpperCAmelCase__ : Optional[Any] = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.0_0085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase__ : Dict = DDIMScheduler(**snake_case__ )
UpperCAmelCase__ : List[Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __a ( self : Dict , snake_case__ : Dict , snake_case__ : Union[str, Any]=0 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase__ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case__ )
# create init_image
UpperCAmelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : int = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Tuple = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : Dict = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = "cpu"
UpperCAmelCase__ : int = self.get_dummy_components()
UpperCAmelCase__ : Optional[Any] = self.pipeline_class(**snake_case__ )
UpperCAmelCase__ : Optional[Any] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : int = pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCAmelCase__ : Any = output.images
UpperCAmelCase__ : Optional[int] = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase__ : Tuple = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : int ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
UpperCAmelCase__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase__ : Union[str, Any] = "A red cartoon frog, 4k"
UpperCAmelCase__ : Dict = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
UpperCAmelCase__ : Any = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Tuple = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase__ : Any = pipeline(
snake_case__ , image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 298
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_lowerCAmelCase : int = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Dict = char
UpperCAmelCase__ : Tuple = set(snake_case )
return pairs
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Tuple = merges_file
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Dict = 3
self.add_from_file(snake_case__ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Dict = {}
def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __a ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase__ : Any = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = 0
while i < len(snake_case__ ):
try:
UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Dict = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Dict = tuple(snake_case__ )
UpperCAmelCase__ : List[Any] = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase__ : Dict = get_pairs(snake_case__ )
UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ )
UpperCAmelCase__ : Optional[int] = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
return word
def __a ( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def __a ( self : Dict , snake_case__ : List[str] ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __a ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def __a ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def __a ( self : List[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
UpperCAmelCase__ : Dict = f.readlines()
for lineTmp in lines:
UpperCAmelCase__ : Optional[int] = lineTmp.strip()
UpperCAmelCase__ : Tuple = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCAmelCase__ : Any = line[:idx]
UpperCAmelCase__ : str = len(self.encoder )
| 298
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __snake_case :
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return None
class __snake_case :
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
"""simple docstring"""
return None
class __snake_case ( unittest.TestCase):
snake_case__ : Optional[Any] = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCAmelCase , '''tf''' , 1_2 , **__lowerCAmelCase )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCAmelCase , '''pt''' , 1_2 , **__lowerCAmelCase )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
from transformers import BertModel
_lowerCamelCase : Optional[int] = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(__lowerCAmelCase ) )
vocab_file.flush()
_lowerCamelCase : str = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__lowerCAmelCase ) ) )
model.save_pretrained(__lowerCAmelCase )
self._test_export(__lowerCAmelCase , '''pt''' , 1_2 , __lowerCAmelCase )
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCamelCase : Dict = self._test_export(__lowerCAmelCase , '''tf''' , 1_2 , **__lowerCAmelCase )
_lowerCamelCase : str = quantize(Path(__lowerCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCAmelCase ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCamelCase : Dict = self._test_export(__lowerCAmelCase , '''pt''' , 1_2 , **__lowerCAmelCase )
_lowerCamelCase : str = quantize(__lowerCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCAmelCase ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=None , **__lowerCAmelCase : int ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
_lowerCamelCase : List[Any] = Path(__lowerCAmelCase ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
return path
except Exception as e:
self.fail(__lowerCAmelCase )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
from transformers import BertModel
_lowerCamelCase : Any = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_lowerCamelCase : Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__lowerCAmelCase , __lowerCAmelCase , '''pt''' )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
from transformers import TFBertModel
_lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_lowerCamelCase : Optional[int] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__lowerCAmelCase , __lowerCAmelCase , '''tf''' )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = FeatureExtractionPipeline(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = infer_shapes(__lowerCAmelCase , __lowerCAmelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCAmelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
_lowerCamelCase : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
_lowerCamelCase , _lowerCamelCase : str = ensure_valid_input(FuncContiguousArgs() , __lowerCAmelCase , __lowerCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCAmelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCAmelCase ) , set(__lowerCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCAmelCase , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_lowerCamelCase , _lowerCamelCase : Optional[Any] = ensure_valid_input(FuncNonContiguousArgs() , __lowerCAmelCase , __lowerCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCAmelCase ) , 1 )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 72
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316
| 0
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = None
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=0.9_9_9 , UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase__ : str = []
for i in range(UpperCAmelCase ):
lowercase__ : int = i / num_diffusion_timesteps
lowercase__ : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase ) / alpha_bar_fn(UpperCAmelCase ) , UpperCAmelCase ) )
return torch.tensor(UpperCAmelCase , dtype=torch.floataa )
class UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = "fixed_small_log" , __lowerCAmelCase = True , __lowerCAmelCase = 1.0 , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = "squaredcos_cap_v2" , ) -> Optional[int]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowercase__ : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
lowercase__ : List[Any] = 1.0 - self.betas
lowercase__ : int = torch.cumprod(self.alphas , dim=0 )
lowercase__ : str = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowercase__ : Optional[Any] = 1.0
# setable values
lowercase__ : Optional[Any] = None
lowercase__ : List[Any] = torch.from_numpy(np.arange(0 , __lowerCAmelCase )[::-1].copy() )
lowercase__ : Tuple = variance_type
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> torch.FloatTensor:
return sample
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Optional[int]:
lowercase__ : List[str] = num_inference_steps
lowercase__ : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowercase__ : List[str] = (np.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowercase__ : str = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Tuple:
if prev_timestep is None:
lowercase__ : Any = t - 1
lowercase__ : Any = self.alphas_cumprod[t]
lowercase__ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : str = 1 - alpha_prod_t
lowercase__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : Tuple = self.betas[t]
else:
lowercase__ : Dict = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : Any = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowercase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowercase__ : int = torch.log(torch.clamp(__lowerCAmelCase , min=1E-20 ) )
lowercase__ : Dict = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowercase__ : Union[str, Any] = variance.log()
lowercase__ : Optional[int] = beta.log()
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=None , __lowerCAmelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
lowercase__ : Tuple = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowercase__ : str = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
lowercase__ : Dict = None
# 1. compute alphas, betas
if prev_timestep is None:
lowercase__ : int = t - 1
lowercase__ : Optional[int] = self.alphas_cumprod[t]
lowercase__ : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : Optional[int] = 1 - alpha_prod_t
lowercase__ : List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : Optional[int] = self.betas[t]
lowercase__ : Optional[Any] = self.alphas[t]
else:
lowercase__ : Any = 1 - alpha_prod_t / alpha_prod_t_prev
lowercase__ : Optional[int] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Dict = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = torch.clamp(
__lowerCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowercase__ : Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ : List[Any] = 0
if t > 0:
lowercase__ : Dict = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase , device=model_output.device )
lowercase__ : Union[str, Any] = self._get_variance(
__lowerCAmelCase , predicted_variance=__lowerCAmelCase , prev_timestep=__lowerCAmelCase , )
if self.variance_type == "fixed_small_log":
lowercase__ : List[Any] = variance
elif self.variance_type == "learned_range":
lowercase__ : int = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
''' for the UnCLIPScheduler.''' )
lowercase__ : List[str] = variance * variance_noise
lowercase__ : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowercase__ : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowercase__ : str = timesteps.to(original_samples.device )
lowercase__ : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
lowercase__ : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ : List[str] = sqrt_alpha_prod.unsqueeze(-1 )
lowercase__ : int = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ : List[Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowercase__ : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 359
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
def merge(UpperCAmelCase , UpperCAmelCase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCAmelCase ) <= 1:
return collection
lowercase__ : int = len(UpperCAmelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__a: str = input("""Enter numbers separated by a comma:\n""").strip()
__a: Optional[int] = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 214
| 0
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
a_ : Dict = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowerCamelCase__ (_UpperCAmelCase = "mumbai"):
SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(url + location).content , 'html.parser')
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'}):
SCREAMING_SNAKE_CASE = job.find('a' , attrs={'data-tn-element': 'jobTitle'}).text.strip()
SCREAMING_SNAKE_CASE = job.find('span' , {'class': 'company'}).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 137
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ : int = logging.getLogger(__name__)
a_ : List[str] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a_ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
_lowercase : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A__ )} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _snake_case :
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
_lowercase : bool = field(default=A__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
_lowercase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowercase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
_lowercase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
_lowercase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
def _dataset(_UpperCAmelCase , _UpperCAmelCase=None):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask')
return LineByLineWithRefDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , ref_path=_UpperCAmelCase , )
return LineByLineTextDataset(tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size)
else:
return TextDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_UpperCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file)
elif args.train_data_files:
return ConcatDataset([_dataset(_UpperCAmelCase) for f in glob(args.train_data_files)])
else:
return _dataset(args.train_data_file , args.train_ref_file)
def lowerCamelCase__ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.')
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.')
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name')
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch')
SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_config(_UpperCAmelCase)
model.resize_token_embeddings(len(_UpperCAmelCase))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).')
if data_args.block_size <= 0:
SCREAMING_SNAKE_CASE = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
SCREAMING_SNAKE_CASE = min(data_args.block_size , tokenizer.max_len)
# Get datasets
SCREAMING_SNAKE_CASE = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , cache_dir=model_args.cache_dir) if training_args.do_train else None
)
SCREAMING_SNAKE_CASE = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , evaluate=_UpperCAmelCase , cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
SCREAMING_SNAKE_CASE = DataCollatorForPermutationLanguageModeling(
tokenizer=_UpperCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
SCREAMING_SNAKE_CASE = DataCollatorForWholeWordMask(
tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability)
else:
SCREAMING_SNAKE_CASE = DataCollatorForLanguageModeling(
tokenizer=_UpperCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
SCREAMING_SNAKE_CASE = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_collator=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , prediction_loss_only=_UpperCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=_UpperCAmelCase)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = math.exp(eval_output['eval_loss'])
SCREAMING_SNAKE_CASE = {'perplexity': perplexity}
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , 'eval_results_lm.txt')
if trainer.is_world_master():
with open(_UpperCAmelCase , 'w') as writer:
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info(' %s = %s' , _UpperCAmelCase , str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
results.update(_UpperCAmelCase)
return results
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 137
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = True
@register_to_config
def __init__( self, __a = 3, __a = 3, __a = ("DownEncoderBlock2D",), __a = ("UpDecoderBlock2D",), __a = (64,), __a = 1, __a = "silu", __a = 4, __a = 32, __a = 32, __a = 0.18_215, ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_lowerCAmelCase : Union[str, Any] = Encoder(
in_channels=__a, out_channels=__a, down_block_types=__a, block_out_channels=__a, layers_per_block=__a, act_fn=__a, norm_num_groups=__a, double_z=__a, )
# pass init params to Decoder
_lowerCAmelCase : List[Any] = Decoder(
in_channels=__a, out_channels=__a, up_block_types=__a, block_out_channels=__a, layers_per_block=__a, norm_num_groups=__a, act_fn=__a, )
_lowerCAmelCase : List[str] = nn.Convad(2 * latent_channels, 2 * latent_channels, 1)
_lowerCAmelCase : str = nn.Convad(__a, __a, 1)
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : int = False
# only relevant if vae tiling is enabled
_lowerCAmelCase : Optional[int] = self.config.sample_size
_lowerCAmelCase : Optional[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size, (list, tuple))
else self.config.sample_size
)
_lowerCAmelCase : Tuple = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
_lowerCAmelCase : Dict = 0.25
def snake_case__ ( self, __a, __a=False):
'''simple docstring'''
if isinstance(__a, (Encoder, Decoder)):
_lowerCAmelCase : Any = value
def snake_case__ ( self, __a = True):
'''simple docstring'''
_lowerCAmelCase : Dict = use_tiling
def snake_case__ ( self):
'''simple docstring'''
self.enable_tiling(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = True
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = {}
def fn_recursive_add_processors(__a, __a, __a):
if hasattr(__a, "set_processor"):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", __a, __a)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__a, __a, __a)
return processors
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = len(self.attn_processors.keys())
if isinstance(__a, __a) and len(__a) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(__a)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(__a, __a, __a):
if hasattr(__a, "set_processor"):
if not isinstance(__a, __a):
module.set_processor(__a)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", __a, __a)
for name, module in self.named_children():
fn_recursive_attn_processor(__a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def snake_case__ ( self, __a, __a = True):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__a, return_dict=__a)
if self.use_slicing and x.shape[0] > 1:
_lowerCAmelCase : List[str] = [self.encoder(__a) for x_slice in x.split(1)]
_lowerCAmelCase : Optional[int] = torch.cat(__a)
else:
_lowerCAmelCase : List[str] = self.encoder(__a)
_lowerCAmelCase : Union[str, Any] = self.quant_conv(__a)
_lowerCAmelCase : Any = DiagonalGaussianDistribution(__a)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__a)
def snake_case__ ( self, __a, __a = True):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__a, return_dict=__a)
_lowerCAmelCase : Dict = self.post_quant_conv(__a)
_lowerCAmelCase : int = self.decoder(__a)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a)
@apply_forward_hook
def snake_case__ ( self, __a, __a = True):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
_lowerCAmelCase : int = [self._decode(__a).sample for z_slice in z.split(1)]
_lowerCAmelCase : Tuple = torch.cat(__a)
else:
_lowerCAmelCase : int = self._decode(__a).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__a)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = min(a.shape[2], b.shape[2], __a)
for y in range(__a):
_lowerCAmelCase : Any = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = min(a.shape[3], b.shape[3], __a)
for x in range(__a):
_lowerCAmelCase : Any = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def snake_case__ ( self, __a, __a = True):
'''simple docstring'''
_lowerCAmelCase : Dict = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
_lowerCAmelCase : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor)
_lowerCAmelCase : Tuple = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_lowerCAmelCase : str = []
for i in range(0, x.shape[2], __a):
_lowerCAmelCase : List[str] = []
for j in range(0, x.shape[3], __a):
_lowerCAmelCase : str = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_lowerCAmelCase : Union[str, Any] = self.encoder(__a)
_lowerCAmelCase : List[str] = self.quant_conv(__a)
row.append(__a)
rows.append(__a)
_lowerCAmelCase : Dict = []
for i, row in enumerate(__a):
_lowerCAmelCase : Union[str, Any] = []
for j, tile in enumerate(__a):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowerCAmelCase : Optional[Any] = self.blend_v(rows[i - 1][j], __a, __a)
if j > 0:
_lowerCAmelCase : Union[str, Any] = self.blend_h(row[j - 1], __a, __a)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(__a, dim=3))
_lowerCAmelCase : Union[str, Any] = torch.cat(__a, dim=2)
_lowerCAmelCase : Any = DiagonalGaussianDistribution(__a)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__a)
def snake_case__ ( self, __a, __a = True):
'''simple docstring'''
_lowerCAmelCase : List[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
_lowerCAmelCase : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor)
_lowerCAmelCase : Optional[Any] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_lowerCAmelCase : Tuple = []
for i in range(0, z.shape[2], __a):
_lowerCAmelCase : Tuple = []
for j in range(0, z.shape[3], __a):
_lowerCAmelCase : Optional[Any] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_lowerCAmelCase : List[str] = self.post_quant_conv(__a)
_lowerCAmelCase : Union[str, Any] = self.decoder(__a)
row.append(__a)
rows.append(__a)
_lowerCAmelCase : str = []
for i, row in enumerate(__a):
_lowerCAmelCase : Any = []
for j, tile in enumerate(__a):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowerCAmelCase : Optional[int] = self.blend_v(rows[i - 1][j], __a, __a)
if j > 0:
_lowerCAmelCase : Dict = self.blend_h(row[j - 1], __a, __a)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(__a, dim=3))
_lowerCAmelCase : Any = torch.cat(__a, dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a)
def snake_case__ ( self, __a, __a = False, __a = True, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = sample
_lowerCAmelCase : Any = self.encode(__a).latent_dist
if sample_posterior:
_lowerCAmelCase : int = posterior.sample(generator=__a)
else:
_lowerCAmelCase : List[str] = posterior.mode()
_lowerCAmelCase : str = self.decode(__a).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a)
| 300
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 1
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase = 25_60_47
__lowerCamelCase = 25_61_45
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : str = NllbTokenizer
A__ : str = NllbTokenizerFast
A__ : List[Any] = True
A__ : int = True
A__ : Union[str, Any] = {}
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Any = NllbTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : Any = NllbTokenizer(snake_case__ , keep_accents=snake_case__ )
snake_case : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
snake_case : Any = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Union[str, Any] = tokenizer_r.save_pretrained(snake_case__ )
snake_case : int = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
snake_case : Optional[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
snake_case : Optional[Any] = tokenizer_r.from_pretrained(snake_case__ )
snake_case : Optional[Any] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
snake_case : Optional[int] = tempfile.mkdtemp()
snake_case : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
snake_case : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
snake_case : Optional[int] = tokenizer_r.from_pretrained(snake_case__ )
snake_case : int = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Optional[int] = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
snake_case : List[str] = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : Union[str, Any] = tokenizer_r.from_pretrained(snake_case__ )
snake_case : Tuple = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
snake_case : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
snake_case : List[str] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
snake_case : Union[str, Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
snake_case : str = tokenizer.prepare_seqaseq_batch(
src_texts=snake_case__ , tgt_texts=snake_case__ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
snake_case : List[Any] = tokenizer.prepare_seqaseq_batch(
snake_case__ , tgt_texts=snake_case__ , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
snake_case : int = tokenizer.prepare_seqaseq_batch(
src_texts=snake_case__ , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , snake_case__ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : int = [AddedToken("<special>" , lstrip=snake_case__ )]
snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ )
snake_case : str = tokenizer_r.encode("Hey this is a <special> token" )
snake_case : str = tokenizer_r.encode("<special>" , add_special_tokens=snake_case__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
snake_case : int = self.rust_tokenizer_class.from_pretrained(
snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ , )
snake_case : str = self.tokenizer_class.from_pretrained(
snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ )
snake_case : Any = tokenizer_p.encode("Hey this is a <special> token" )
snake_case : Optional[Any] = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
A__ : int = "facebook/nllb-200-distilled-600M"
A__ : List[str] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
A__ : Tuple = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
A__ : Union[str, Any] = [
25_60_47,
1_62_97,
13_44_08,
81_65,
24_80_66,
1_47_34,
9_50,
11_35,
10_57_21,
35_73,
83,
2_73_52,
1_08,
4_94_86,
2,
]
@classmethod
def _SCREAMING_SNAKE_CASE (cls : List[Any] ) -> Tuple:
'''simple docstring'''
snake_case : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
snake_case : str = 1
return cls
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_60_57 )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict:
'''simple docstring'''
snake_case : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case : int = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
snake_case : Tuple = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
snake_case : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , snake_case__ )
snake_case : Optional[int] = 10
snake_case : str = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_62_03, 3] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
snake_case : int = NllbTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
snake_case : int = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
snake_case : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(snake_case__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors="pt" )
snake_case : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors="pt" )
snake_case : List[Any] = targets["input_ids"]
snake_case : str = shift_tokens_right(
snake_case__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# A, test, EOS, en_XX
"input_ids": [[25_60_47, 70, 73_56, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_60_57,
} , )
@require_torch
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = True
snake_case : int = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
snake_case : Optional[Any] = False
snake_case : Tuple = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 59
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int )->int:
# Initialise PyTorch model
A__ = BertConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
A__ = BertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
a__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a__: Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 193
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
return np.maximum(0 , UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """xlm-roberta"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 303
| 1
|
'''simple docstring'''
from pathlib import Path
import fire
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : str = Path(snake_case__ )
__UpperCamelCase : List[Any] = Path(snake_case__ )
dest_dir.mkdir(exist_ok=snake_case__ )
for path in src_dir.iterdir():
__UpperCamelCase : List[str] = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCamelCase : Dict = dest_dir.joinpath(path.name )
print(snake_case__ )
dest_path.open("w" ).write("\n".join(snake_case__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 298
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> Tuple:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def __lowerCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __lowerCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@require_beam
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> Optional[Any]:
import apache_beam as beam
__UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet
__UpperCamelCase : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
__UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> str:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a_ (self ) -> List[str]:
__UpperCamelCase : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
__UpperCamelCase : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 1
|
import sys
from collections import defaultdict
class __SCREAMING_SNAKE_CASE :
def __init__( self : str ) ->List[Any]:
lowerCamelCase__ : List[Any] = []
def __lowerCamelCase ( self : Optional[int] , A : List[Any] ) ->Any:
return self.node_position[vertex]
def __lowerCamelCase ( self : Dict , A : Tuple , A : List[str] ) ->Dict:
lowerCamelCase__ : Union[str, Any] = pos
def __lowerCamelCase ( self : Dict , A : List[Any] , A : int , A : List[Any] , A : Tuple ) ->List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCamelCase__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCamelCase__ : List[Any] = 2 * start + 1
else:
lowerCamelCase__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCamelCase__ , lowerCamelCase__ : str = heap[smallest_child], positions[smallest_child]
lowerCamelCase__ , lowerCamelCase__ : List[str] = (
heap[start],
positions[start],
)
lowerCamelCase__ , lowerCamelCase__ : str = temp, tempa
lowerCamelCase__ : int = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , A )
self.top_to_bottom(A , A , A , A )
def __lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Any , A : str , A : Any ) ->Tuple:
lowerCamelCase__ : Tuple = position[index]
while index != 0:
lowerCamelCase__ : List[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCamelCase__ : List[str] = heap[parent]
lowerCamelCase__ : Optional[Any] = position[parent]
self.set_position(position[parent] , A )
else:
lowerCamelCase__ : Dict = val
lowerCamelCase__ : Optional[int] = temp
self.set_position(A , A )
break
lowerCamelCase__ : List[Any] = parent
else:
lowerCamelCase__ : Optional[int] = val
lowerCamelCase__ : Optional[int] = temp
self.set_position(A , 0 )
def __lowerCamelCase ( self : List[str] , A : List[Any] , A : Tuple ) ->Optional[int]:
lowerCamelCase__ : List[str] = len(A ) // 2 - 1
for i in range(A , -1 , -1 ):
self.top_to_bottom(A , A , len(A ) , A )
def __lowerCamelCase ( self : List[str] , A : Dict , A : Optional[int] ) ->Union[str, Any]:
lowerCamelCase__ : int = positions[0]
lowerCamelCase__ : Any = sys.maxsize
self.top_to_bottom(A , 0 , len(A ) , A )
return temp
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : List[str] = Heap()
lowerCamelCase__ : Optional[int] = [0] * len(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = [-1] * len(UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCamelCase__ : Any = [] # Heap of Distance of vertices from their neighboring vertex
lowerCamelCase__ : Dict = []
for vertex in range(len(UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase )
heap.node_position.append(UpperCAmelCase )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Tuple = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Tuple = distance
heap.heapify(UpperCAmelCase , UpperCAmelCase )
for _ in range(1 , len(UpperCAmelCase ) ):
lowerCamelCase__ : Any = heap.delete_minimum(UpperCAmelCase , UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCamelCase__ : Optional[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase )]
):
lowerCamelCase__ : List[str] = distance
heap.bottom_to_top(
UpperCAmelCase , heap.get_position(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_A : Tuple = int(input('Enter number of edges: ').strip())
_A : Tuple = defaultdict(list)
for _ in range(edges_number):
_A : Optional[Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 265
|
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError('''only integers accepted as input''' )
else:
lowerCamelCase__ : Any = str(abs(UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = [list(UpperCAmelCase ) for char in range(len(UpperCAmelCase ) )]
for index in range(len(UpperCAmelCase ) ):
num_transpositions[index].pop(UpperCAmelCase )
return max(
int(''''''.join(list(UpperCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 265
| 1
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case : List[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def a_ ( lowerCAmelCase_ : np.ndarray, lowerCAmelCase_ : float, lowerCAmelCase_ : int = 1_6000 ):
__lowerCAmelCase = int(round(sample_rate * max_length ) )
if len(SCREAMING_SNAKE_CASE_ ) <= sample_length:
return wav
__lowerCAmelCase = randint(0, len(SCREAMING_SNAKE_CASE_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(default=__snake_case , metadata={"""help""": """Name of a dataset from the datasets package"""} )
a_ = field(
default=__snake_case , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(
default=__snake_case , metadata={"""help""": """A file containing the training audio paths and labels."""} )
a_ = field(
default=__snake_case , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
a_ = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
a_ = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
a_ = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
a_ = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
a_ = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a_ = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
a_ = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
a_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ = field(
default=__snake_case , metadata={"""help""": """Name or path of preprocessor config."""} )
a_ = field(
default=__snake_case , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
a_ = field(
default=__snake_case , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
a_ = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
a_ = field(
default=__snake_case , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a_ = field(
default=__snake_case , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowercase ( self : int ) -> Optional[int]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowerCAmelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def a_ ( ):
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification', SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
__lowerCAmelCase = DatasetDict()
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, use_auth_token=True if model_args.use_auth_token else None, )
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, use_auth_token=True if model_args.use_auth_token else None, )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--audio_column_name` to the correct audio column - one of '
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--label_column_name` to the correct text column - one of '
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path, return_attention_mask=model_args.attention_mask, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__lowerCAmelCase = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__lowerCAmelCase = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = []
for audio in batch[data_args.audio_column_name]:
__lowerCAmelCase = random_subsample(
audio['array'], max_length=data_args.max_length_seconds, sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE_, sampling_rate=feature_extractor.sampling_rate )
__lowerCAmelCase = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE_ )}
__lowerCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase_ : int ):
__lowerCAmelCase = [audio['array'] for audio in batch[data_args.audio_column_name]]
__lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE_, sampling_rate=feature_extractor.sampling_rate )
__lowerCAmelCase = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE_ )}
__lowerCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowerCAmelCase = raw_datasets['train'].features[data_args.label_column_name].names
__lowerCAmelCase = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = label
# Load the accuracy metric from the datasets package
__lowerCAmelCase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = np.argmax(eval_pred.predictions, axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE_, references=eval_pred.label_ids )
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(SCREAMING_SNAKE_CASE_ ), labelaid=SCREAMING_SNAKE_CASE_, idalabel=SCREAMING_SNAKE_CASE_, finetuning_task='audio-classification', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
__lowerCAmelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=SCREAMING_SNAKE_CASE_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__lowerCAmelCase = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(SCREAMING_SNAKE_CASE_, output_all_columns=SCREAMING_SNAKE_CASE_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__lowerCAmelCase = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(SCREAMING_SNAKE_CASE_, output_all_columns=SCREAMING_SNAKE_CASE_ )
# Initialize our trainer
__lowerCAmelCase = Trainer(
model=SCREAMING_SNAKE_CASE_, args=SCREAMING_SNAKE_CASE_, train_dataset=raw_datasets['train'] if training_args.do_train else None, eval_dataset=raw_datasets['eval'] if training_args.do_eval else None, compute_metrics=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model()
trainer.log_metrics('train', train_result.metrics )
trainer.save_metrics('train', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCAmelCase = trainer.evaluate()
trainer.log_metrics('eval', SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('eval', SCREAMING_SNAKE_CASE_ )
# Write model card and (optionally) push to hub
__lowerCAmelCase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 284
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a , a=2 , a=3 , a=4 , a=2 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=36 , a=2 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=6 , a=6 , a=3 , a=4 , a=None , a=1000 , ):
lowercase__ : List[str] = parent
lowercase__ : List[str] = batch_size
lowercase__ : int = num_channels
lowercase__ : List[Any] = image_size
lowercase__ : List[str] = patch_size
lowercase__ : List[Any] = is_training
lowercase__ : Tuple = use_input_mask
lowercase__ : str = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Any = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : List[Any] = type_vocab_size
lowercase__ : str = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : Union[str, Any] = coordinate_size
lowercase__ : Union[str, Any] = shape_size
lowercase__ : Any = num_labels
lowercase__ : List[str] = num_choices
lowercase__ : Optional[Any] = scope
lowercase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase__ : Optional[int] = text_seq_length
lowercase__ : Optional[int] = (image_size // patch_size) ** 2 + 1
lowercase__ : str = self.text_seq_length + self.image_seq_length
def snake_case_ ( self):
lowercase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
lowercase__ : str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ : Optional[Any] = bbox[i, j, 3]
lowercase__ : List[Any] = bbox[i, j, 1]
lowercase__ : List[str] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ : int = bbox[i, j, 2]
lowercase__ : List[Any] = bbox[i, j, 0]
lowercase__ : Optional[Any] = tmp_coordinate
lowercase__ : Dict = tf.constant(a)
lowercase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase__ : Optional[Any] = None
if self.use_input_mask:
lowercase__ : str = random_attention_mask([self.batch_size, self.text_seq_length])
lowercase__ : Tuple = None
if self.use_token_type_ids:
lowercase__ : str = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
lowercase__ : List[Any] = None
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
lowercase__ : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self , a , a , a , a , a , a):
lowercase__ : str = TFLayoutLMvaModel(config=a)
# text + image
lowercase__ : List[str] = model(a , pixel_values=a , training=a)
lowercase__ : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
lowercase__ : List[Any] = model(a , bbox=a , pixel_values=a , training=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
lowercase__ : List[Any] = model(a , training=a)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
lowercase__ : Dict = model({'pixel_values': pixel_values} , training=a)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def snake_case_ ( self , a , a , a , a , a , a , a):
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : Optional[Any] = TFLayoutLMvaForSequenceClassification(config=a)
lowercase__ : List[str] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self , a , a , a , a , a , a , a):
lowercase__ : Tuple = self.num_labels
lowercase__ : Dict = TFLayoutLMvaForTokenClassification(config=a)
lowercase__ : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def snake_case_ ( self , a , a , a , a , a , a , a):
lowercase__ : Optional[int] = 2
lowercase__ : List[str] = TFLayoutLMvaForQuestionAnswering(config=a)
lowercase__ : Tuple = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case_ ( self):
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : Optional[Any] = config_and_inputs
lowercase__ : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , unittest.TestCase ):
__lowerCamelCase : List[str] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : int = False
def snake_case_ ( self , a , a , a , a , a):
return True
def snake_case_ ( self , a , a , a=False):
lowercase__ : Tuple = copy.deepcopy(a)
if model_class in get_values(a):
lowercase__ : Optional[Any] = {
k: tf.tile(tf.expand_dims(a , 1) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(a , tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a):
lowercase__ : Union[str, Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(a):
lowercase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
lowercase__ : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(a):
lowercase__ : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(a):
lowercase__ : Optional[int] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa)
return inputs_dict
def snake_case_ ( self):
lowercase__ : Tuple = TFLayoutLMvaModelTester(self)
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , hidden_size=37)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(a)
if getattr(a , 'hf_compute_loss' , a):
# The number of elements in the loss should be the same as the number of elements in the label
lowercase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a)
lowercase__ : Union[str, Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a)[0]
]
lowercase__ : Tuple = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowercase__ : Dict = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a)
lowercase__ : int = prepared_for_class.pop('input_ids')
lowercase__ : Optional[int] = model(a , **a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
lowercase__ : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a)
lowercase__ : str = prepared_for_class.pop('input_ids')
if "labels" in prepared_for_class:
lowercase__ : Optional[Any] = prepared_for_class['labels'].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
lowercase__ : Union[str, Any] = -100
lowercase__ : Optional[Any] = tf.convert_to_tensor(a)
lowercase__ : Any = model(a , **a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
lowercase__ : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a)
lowercase__ : Optional[Any] = model(a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
lowercase__ : List[str] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a)
# Get keys that were added with the _prepare_for_class function
lowercase__ : int = prepared_for_class.keys() - inputs_dict.keys()
lowercase__ : List[Any] = inspect.signature(model.call).parameters
lowercase__ : List[str] = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
lowercase__ : Dict = {0: 'input_ids'}
for label_key in label_keys:
lowercase__ : Tuple = signature_names.index(a)
lowercase__ : List[str] = label_key
lowercase__ : int = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
lowercase__ : List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
lowercase__ : Optional[int] = prepared_for_class[value]
lowercase__ : Any = tuple(a)
# Send to model
lowercase__ : List[str] = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def snake_case_ ( self):
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a)
def snake_case_ ( self):
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : Dict = type
self.model_tester.create_and_check_model(a , a , a , a , a , a)
def snake_case_ ( self):
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a)
def snake_case_ ( self):
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a)
def snake_case_ ( self):
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a)
@slow
def snake_case_ ( self):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = TFLayoutLMvaModel.from_pretrained(a)
self.assertIsNotNone(a)
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@cached_property
def snake_case_ ( self):
return LayoutLMvaImageProcessor(apply_ocr=a) if is_vision_available() else None
@slow
def snake_case_ ( self):
lowercase__ : Optional[int] = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base')
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Optional[int] = image_processor(images=a , return_tensors='tf').pixel_values
lowercase__ : List[Any] = tf.constant([[1, 2]])
lowercase__ : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) , axis=0)
# forward pass
lowercase__ : List[str] = model(input_ids=a , bbox=a , pixel_values=a , training=a)
# verify the logits
lowercase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a)
lowercase__ : Union[str, Any] = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4))
| 214
| 0
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Tuple = (EulerDiscreteScheduler,)
A__ : int = 10
def lowerCamelCase_ ( self , **__UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__UpperCamelCase )
return config
def lowerCamelCase_ ( self ):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase_ = sample.to(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase_ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase_ = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase_ = sample.to(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase_ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCamelCase )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCamelCase_ = sample.to(__UpperCamelCase )
for t in scheduler.timesteps:
UpperCamelCase_ = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase_ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**__UpperCamelCase , use_karras_sigmas=__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCamelCase )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCamelCase_ = sample.to(__UpperCamelCase )
for t in scheduler.timesteps:
UpperCamelCase_ = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase_ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 261
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( a__ : Dict ) -> List[Any]:
UpperCamelCase_ = {}
UpperCamelCase_ = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""]
UpperCamelCase_ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_A = HfArgumentParser(PretokenizationArguments)
_A = parser.parse_args()
if args.num_workers is None:
_A = multiprocessing.cpu_count()
_A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_A = time.time()
_A = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_A = time.time()
_A = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_A = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 261
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Tuple , snake_case :Any , snake_case :List[Any]=13 , snake_case :int=7 , snake_case :int=True , snake_case :Union[str, Any]=True , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Optional[int]=99 , snake_case :Tuple=32 , snake_case :List[Any]=5 , snake_case :int=4 , snake_case :List[str]=37 , snake_case :Tuple="gelu" , snake_case :List[str]=0.1 , snake_case :List[Any]=0.1 , snake_case :Union[str, Any]=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Union[str, Any]=0.02 , snake_case :List[Any]=4 , ):
'''simple docstring'''
A_ : Any = parent
A_ : Union[str, Any] = batch_size
A_ : int = seq_length
A_ : Optional[int] = is_training
A_ : Optional[Any] = use_attention_mask
A_ : Union[str, Any] = use_token_type_ids
A_ : str = use_labels
A_ : Any = vocab_size
A_ : Any = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : Any = initializer_range
A_ : int = num_choices
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Tuple = None
if self.use_attention_mask:
A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : List[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ : str = config_and_inputs
A_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : str = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ : List[Any] = config_and_inputs
A_ : Dict = True
A_ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = FlaxRobertaModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
A_ : List[str] = model_class_name.from_pretrained("roberta-base" , from_pt=snake_case )
A_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
| 300
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ):
'''simple docstring'''
A_ : str = parent
A_ : str = batch_size
A_ : str = seq_length
A_ : Any = is_training
A_ : Any = use_input_mask
A_ : str = use_token_type_ids
A_ : Tuple = use_labels
A_ : Optional[Any] = vocab_size
A_ : Dict = hidden_size
A_ : str = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : str = intermediate_size
A_ : int = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Dict = initializer_range
A_ : Any = num_labels
A_ : Optional[int] = num_choices
A_ : Optional[Any] = scope
A_ : Any = range_bbox
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : str = bbox[i, j, 3]
A_ : Union[str, Any] = bbox[i, j, 1]
A_ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : Any = bbox[i, j, 2]
A_ : Tuple = bbox[i, j, 0]
A_ : int = t
A_ : int = tf.convert_to_tensor(snake_case )
A_ : Any = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Dict = None
A_ : List[Any] = None
A_ : List[str] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Any = TFLayoutLMModel(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A_ : str = model(snake_case , snake_case , token_type_ids=snake_case )
A_ : List[Any] = model(snake_case , snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : int = TFLayoutLMForSequenceClassification(config=snake_case )
A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.num_labels
A_ : str = TFLayoutLMForTokenClassification(config=snake_case )
A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case )
A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 10
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Tuple = TFLayoutLMModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def __snake_case ( ) -> Optional[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the sequence output on [0, :3, :3]
A_ : List[Any] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) )
# test the pooled output on [1, :3]
A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Dict = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
A_ : List[str] = outputs.loss
A_ : Union[str, Any] = (2,)
self.assertEqual(loss.shape , snake_case )
# test the shape of the logits
A_ : Tuple = outputs.logits
A_ : Tuple = (2, 2)
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
# test the shape of the logits
A_ : Dict = outputs.logits
A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the shape of the logits
A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case )
self.assertEqual(outputs.end_logits.shape , snake_case )
| 300
| 1
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 267
|
import numpy as np
import datasets
UpperCAmelCase = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
UpperCAmelCase = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
UpperCAmelCase = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# convert to numpy arrays
snake_case_ = np.array(_UpperCAmelCase )
snake_case_ = np.array(_UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
snake_case_ = X - np.mean(_UpperCAmelCase )
snake_case_ = np.cov(reference_distribution.T )
try:
snake_case_ = np.linalg.inv(_UpperCAmelCase )
except np.linalg.LinAlgError:
snake_case_ = np.linalg.pinv(_UpperCAmelCase )
snake_case_ = np.dot(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = np.dot(_UpperCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 267
| 1
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def a__ ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
"""simple docstring"""
if attention_mask is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE : List[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE : Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE : Tuple = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : int , _A : Optional[int] , _A : Optional[int]=13 , _A : int=7 , _A : Dict=True , _A : str=False , _A : Dict=99 , _A : int=16 , _A : str=2 , _A : Optional[Any]=4 , _A : Tuple=4 , _A : Union[str, Any]="relu" , _A : Optional[Any]=0.1 , _A : int=0.1 , _A : str=0.0 , _A : int=0.0 , _A : Dict=20 , _A : str=2 , _A : Dict=1 , _A : Optional[Any]=0 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : int = is_training
__SCREAMING_SNAKE_CASE : Tuple = use_labels
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
__SCREAMING_SNAKE_CASE : Optional[Any] = decoder_layerdrop
__SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = eos_token_id
__SCREAMING_SNAKE_CASE : Dict = pad_token_id
__SCREAMING_SNAKE_CASE : Tuple = bos_token_id
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = self.eos_token_id # Eos Token
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE : Dict = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
__SCREAMING_SNAKE_CASE : Tuple = prepare_mam_aaa_inputs_dict(_A , _A , _A )
return config, inputs_dict
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self : int , _A : List[str] , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = MaMaaaModel(config=_A ).get_decoder().to(_A ).eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict['''attention_mask''']
__SCREAMING_SNAKE_CASE : Any = inputs_dict['''head_mask''']
# first forward pass
__SCREAMING_SNAKE_CASE : List[Any] = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE : Optional[int] = model(_A , attention_mask=_A )['''last_hidden_state''']
__SCREAMING_SNAKE_CASE : List[str] = model(_A , attention_mask=_A , past_key_values=_A )[
'''last_hidden_state'''
]
# select random slice
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-2 ) )
def UpperCAmelCase__ ( self : Optional[int] , _A : List[str] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = MaMaaaModel(config=_A ).to(_A ).eval()
__SCREAMING_SNAKE_CASE : Tuple = model(**_A )
__SCREAMING_SNAKE_CASE : Any = outputs.encoder_last_hidden_state
__SCREAMING_SNAKE_CASE : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Dict = model.get_encoder()
encoder.save_pretrained(_A )
__SCREAMING_SNAKE_CASE : List[str] = MaMaaaEncoder.from_pretrained(_A ).to(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Tuple = model.get_decoder()
decoder.save_pretrained(_A )
__SCREAMING_SNAKE_CASE : int = MaMaaaDecoder.from_pretrained(_A ).to(_A )
__SCREAMING_SNAKE_CASE : Any = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=_A , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : Dict , _A : Union[str, Any] , _A : Any , _A : int , _A : str , _A : int ):
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = MaMaaaModelTester(self )
__SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=_A )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Tuple = model_class(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(_A , output_loading_info=_A )
self.assertEqual(info['''missing_keys'''] , [] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_A )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__SCREAMING_SNAKE_CASE : str = model_class(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self._prepare_for_class(_A , _A ) )
if not self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE : int = inputs['''input_ids''']
del inputs["input_ids"]
else:
__SCREAMING_SNAKE_CASE : List[str] = inputs['''input_ids''']
__SCREAMING_SNAKE_CASE : Optional[Any] = inputs.get('''decoder_input_ids''' , _A )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , _A )
__SCREAMING_SNAKE_CASE : Dict = model.get_input_embeddings()
if not self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE : Optional[Any] = wte(_A )
else:
__SCREAMING_SNAKE_CASE : Dict = wte(_A )
__SCREAMING_SNAKE_CASE : str = wte(_A )
with torch.no_grad():
model(**_A )[0]
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : int = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.ne(1 ).to(_A )
__SCREAMING_SNAKE_CASE : int = MaMaaaForConditionalGeneration(_A ).eval().to(_A )
if torch_device == "cuda":
model.half()
model.generate(_A , attention_mask=_A )
model.generate(num_beams=4 , do_sample=_A , early_stopping=_A , num_return_sequences=3 )
def a__ ( snake_case ):
"""simple docstring"""
return torch.tensor(snake_case , dtype=torch.long , device=snake_case )
lowercase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(_A )
__SCREAMING_SNAKE_CASE : Dict = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
__SCREAMING_SNAKE_CASE : int = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , _A , _A )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**_A )[0]
__SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , _A )
# change to expected output here
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=_A )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=_A ) )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(_A )
# change to intended input
__SCREAMING_SNAKE_CASE : Dict = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
__SCREAMING_SNAKE_CASE : Tuple = prepare_mam_aaa_inputs_dict(model.config , _A , _A )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[int] = model(**_A )[0]
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _A )
# change to expected output here
__SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=_A )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=_A ) )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(_A )
__SCREAMING_SNAKE_CASE : Any = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(_A , padding=_A , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Tuple = model.generate(
input_ids=dct['''input_ids'''].to(_A ) , attention_mask=dct['''attention_mask'''].to(_A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , )
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_A , skip_special_tokens=_A )
assert generated == expected_en
| 303
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303
| 1
|
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = (boundary[1] - boundary[0]) / steps
lowerCamelCase__ : Optional[Any] = boundary[0]
lowerCamelCase__ : List[str] = boundary[1]
lowerCamelCase__ : Union[str, Any] = make_points(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = 0.0
y += (h / 2.0) * f(UpperCAmelCase )
for i in x_i:
# print(i)
y += h * f(UpperCAmelCase )
y += (h / 2.0) * f(UpperCAmelCase )
return y
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = a + h
while x < (b - h):
yield x
lowerCamelCase__ : Tuple = x + h
def _a ( UpperCAmelCase ) -> int: # enter your function here
"""simple docstring"""
lowerCamelCase__ : Optional[int] = (x - 0) * (x - 0)
return y
def _a ( ) -> Any:
"""simple docstring"""
lowerCamelCase__ : List[str] = 0.0 # Lower bound of integration
lowerCamelCase__ : int = 1.0 # Upper bound of integration
lowerCamelCase__ : Dict = 10.0 # define number of steps or resolution
lowerCamelCase__ : str = [a, b] # define boundary of integration
lowerCamelCase__ : str = method_a(UpperCAmelCase , UpperCAmelCase )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 354
|
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
lowerCamelCase__ : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265
| 0
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
a : List[str] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCamelCase_ ( nn.Module ):
def __init__( self , A ) -> str:
super().__init__()
UpperCAmelCase : str = torchvision.models.resnetaaa(pretrained=A )
UpperCAmelCase : List[str] = list(model.children() )[:-2]
UpperCAmelCase : Any = nn.Sequential(*A )
UpperCAmelCase : int = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _lowercase( self , A ) -> List[str]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCAmelCase : str = self.pool(self.model(A ) )
UpperCAmelCase : Dict = torch.flatten(A , start_dim=2 )
UpperCAmelCase : int = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A , A ) -> int:
UpperCAmelCase : List[str] = [json.loads(A ) for l in open(A )]
UpperCAmelCase : List[Any] = os.path.dirname(A )
UpperCAmelCase : Optional[Any] = tokenizer
UpperCAmelCase : Tuple = labels
UpperCAmelCase : List[Any] = len(A )
UpperCAmelCase : Dict = max_seq_length
UpperCAmelCase : List[Any] = transforms
def __len__( self ) -> Optional[int]:
return len(self.data )
def __getitem__( self , A ) -> Any:
UpperCAmelCase : Optional[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=A ) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase : List[Any] = sentence[: self.max_seq_length]
UpperCAmelCase : Optional[int] = torch.zeros(self.n_classes )
UpperCAmelCase : str = 1
UpperCAmelCase : List[Any] = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
UpperCAmelCase : Optional[int] = self.transforms(A )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def __lowerCamelCase ( _lowercase ) -> Dict:
UpperCAmelCase : Dict = [len(row["""sentence"""] ) for row in batch]
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = len(_lowercase ), max(_lowercase )
UpperCAmelCase : Dict = torch.zeros(_lowercase , _lowercase , dtype=torch.long )
UpperCAmelCase : str = torch.zeros(_lowercase , _lowercase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowercase , _lowercase ) ):
UpperCAmelCase : List[Any] = input_row["""sentence"""]
UpperCAmelCase : List[str] = 1
UpperCAmelCase : Union[str, Any] = torch.stack([row["""image"""] for row in batch] )
UpperCAmelCase : Any = torch.stack([row["""label"""] for row in batch] )
UpperCAmelCase : List[Any] = torch.stack([row["""image_start_token"""] for row in batch] )
UpperCAmelCase : Optional[int] = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __lowerCamelCase ( ) -> Tuple:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __lowerCamelCase ( ) -> List[str]:
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 265
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> Any:
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase_ ( nn.Module ):
def __init__( self ) -> Any:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(3 , 4 )
UpperCAmelCase : Tuple = nn.BatchNormad(4 )
UpperCAmelCase : int = nn.Linear(4 , 5 )
def _lowercase( self , A ) -> Any:
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A , [128, 64, 32, 16, 8] )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" )
self.assertListEqual(A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _lowercase( self ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A ):
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A , A ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _lowercase( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated()
UpperCAmelCase : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , A )
UpperCAmelCase : Tuple = release_memory(A )
self.assertEqual(torch.cuda.memory_allocated() , A )
| 265
| 1
|
"""simple docstring"""
import os
import sys
import unittest
_a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = find_backend(''' if not is_torch_available():''')
self.assertEqual(__a , '''torch''')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCamelCase = find_backend(''' if not (is_torch_available() and is_transformers_available()):''')
self.assertEqual(__a , '''torch_and_transformers''')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCamelCase = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''')
self.assertEqual(__a , '''torch_and_transformers_and_onnx''')
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __a)
self.assertIn('''torch_and_transformers''' , __a)
self.assertIn('''flax_and_transformers''' , __a)
self.assertIn('''torch_and_transformers_and_onnx''' , __a)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''])
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''])
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''])
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''])
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''])
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''])
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(__a , '''\nCONSTANT = None\n''')
_UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
__a , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
_UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
_UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(__a , __a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
_UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , __a)
| 100
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
lowercase__ = 'CIDAS/clipseg-rd64-refined'
lowercase__ = 'image_segmenter'
lowercase__ = CLIPSegForImageSegmentation
lowercase__ = ['image', 'text']
lowercase__ = ['image']
def __init__( self , *__a , **__a) -> Any:
'''simple docstring'''
requires_backends(self , ['''vision'''])
super().__init__(*__a , **__a)
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=__a , return_tensors='''pt''')
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_UpperCamelCase = self.model(**__a).logits
return logits
def UpperCAmelCase ( self , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = outputs.cpu().detach().numpy()
_UpperCamelCase = 0
_UpperCamelCase = 1
return Image.fromarray((array * 2_55).astype(np.uinta))
| 100
| 1
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "geglu" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "layer_norm" , __SCREAMING_SNAKE_CASE = False , ) ->Tuple:
super().__init__()
lowerCAmelCase = only_cross_attention
lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCAmelCase = AdaLayerNorm(__UpperCamelCase , __UpperCamelCase )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase = AdaLayerNormZero(__UpperCamelCase , __UpperCamelCase )
else:
lowerCAmelCase = nn.LayerNorm(__UpperCamelCase , elementwise_affine=__UpperCamelCase )
lowerCAmelCase = Attention(
query_dim=__UpperCamelCase , heads=__UpperCamelCase , dim_head=__UpperCamelCase , dropout=__UpperCamelCase , bias=__UpperCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__UpperCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCAmelCase = (
AdaLayerNorm(__UpperCamelCase , __UpperCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(__UpperCamelCase , elementwise_affine=__UpperCamelCase )
)
lowerCAmelCase = Attention(
query_dim=__UpperCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__UpperCamelCase , dim_head=__UpperCamelCase , dropout=__UpperCamelCase , bias=__UpperCamelCase , upcast_attention=__UpperCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
lowerCAmelCase = None
lowerCAmelCase = None
# 3. Feed-forward
lowerCAmelCase = nn.LayerNorm(__UpperCamelCase , elementwise_affine=__UpperCamelCase )
lowerCAmelCase = FeedForward(__UpperCamelCase , dropout=__UpperCamelCase , activation_fn=__UpperCamelCase , final_dropout=__UpperCamelCase )
# let chunk size default to None
lowerCAmelCase = None
lowerCAmelCase = 0
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
# Sets chunk feed-forward
lowerCAmelCase = chunk_size
lowerCAmelCase = dim
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) ->Dict:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
lowerCAmelCase = self.norma(__UpperCamelCase , __UpperCamelCase )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.norma(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hidden_dtype=hidden_states.dtype )
else:
lowerCAmelCase = self.norma(__UpperCamelCase )
lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCAmelCase = self.attna(
__UpperCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__UpperCamelCase , **__UpperCamelCase , )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCAmelCase = (
self.norma(__UpperCamelCase , __UpperCamelCase ) if self.use_ada_layer_norm else self.norma(__UpperCamelCase )
)
lowerCAmelCase = self.attna(
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , attention_mask=__UpperCamelCase , **__UpperCamelCase , )
lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
lowerCAmelCase = self.norma(__UpperCamelCase )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." )
lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCAmelCase = torch.cat(
[self.ff(__UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(__UpperCamelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCAmelCase = self.ff(__UpperCamelCase )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
lowerCAmelCase = ff_output + hidden_states
return hidden_states
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 4 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = "geglu" , __SCREAMING_SNAKE_CASE = False , ) ->str:
super().__init__()
lowerCAmelCase = int(dim * mult )
lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCAmelCase = GELU(__UpperCamelCase , __UpperCamelCase )
if activation_fn == "gelu-approximate":
lowerCAmelCase = GELU(__UpperCamelCase , __UpperCamelCase , approximate='''tanh''' )
elif activation_fn == "geglu":
lowerCAmelCase = GEGLU(__UpperCamelCase , __UpperCamelCase )
elif activation_fn == "geglu-approximate":
lowerCAmelCase = ApproximateGELU(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase = nn.ModuleList([] )
# project in
self.net.append(__UpperCamelCase )
# project dropout
self.net.append(nn.Dropout(__UpperCamelCase ) )
# project out
self.net.append(nn.Linear(__UpperCamelCase , __UpperCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Dict:
for module in self.net:
lowerCAmelCase = module(__UpperCamelCase )
return hidden_states
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "none" ) ->List[Any]:
super().__init__()
lowerCAmelCase = nn.Linear(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase = approximate
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if gate.device.type != "mps":
return F.gelu(__UpperCamelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
lowerCAmelCase = self.proj(__UpperCamelCase )
lowerCAmelCase = self.gelu(__UpperCamelCase )
return hidden_states
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
super().__init__()
lowerCAmelCase = nn.Linear(__UpperCamelCase , dim_out * 2 )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[Any]:
if gate.device.type != "mps":
return F.gelu(__UpperCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase , lowerCAmelCase = self.proj(__UpperCamelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__UpperCamelCase )
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int:
super().__init__()
lowerCAmelCase = nn.Linear(__UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = self.proj(__UpperCamelCase )
return x * torch.sigmoid(1.7_0_2 * x )
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
super().__init__()
lowerCAmelCase = nn.Embedding(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase = nn.SiLU()
lowerCAmelCase = nn.Linear(__UpperCamelCase , embedding_dim * 2 )
lowerCAmelCase = nn.LayerNorm(__UpperCamelCase , elementwise_affine=__UpperCamelCase )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int:
lowerCAmelCase = self.linear(self.silu(self.emb(__UpperCamelCase ) ) )
lowerCAmelCase , lowerCAmelCase = torch.chunk(__UpperCamelCase , 2 )
lowerCAmelCase = self.norm(__UpperCamelCase ) * (1 + scale) + shift
return x
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[str]:
super().__init__()
lowerCAmelCase = CombinedTimestepLabelEmbeddings(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase = nn.SiLU()
lowerCAmelCase = nn.Linear(__UpperCamelCase , 6 * embedding_dim , bias=__UpperCamelCase )
lowerCAmelCase = nn.LayerNorm(__UpperCamelCase , elementwise_affine=__UpperCamelCase , eps=1e-6 )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Any:
lowerCAmelCase = self.linear(self.silu(self.emb(__UpperCamelCase , __UpperCamelCase , hidden_dtype=__UpperCamelCase ) ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = emb.chunk(6 , dim=1 )
lowerCAmelCase = self.norm(__UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1e-5 ) ->List[str]:
super().__init__()
lowerCAmelCase = num_groups
lowerCAmelCase = eps
if act_fn is None:
lowerCAmelCase = None
else:
lowerCAmelCase = get_activation(__UpperCamelCase )
lowerCAmelCase = nn.Linear(__UpperCamelCase , out_dim * 2 )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[str]:
if self.act:
lowerCAmelCase = self.act(__UpperCamelCase )
lowerCAmelCase = self.linear(__UpperCamelCase )
lowerCAmelCase = emb[:, :, None, None]
lowerCAmelCase , lowerCAmelCase = emb.chunk(2 , dim=1 )
lowerCAmelCase = F.group_norm(__UpperCamelCase , self.num_groups , eps=self.eps )
lowerCAmelCase = x * (1 + scale) + shift
return x
| 338
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """camembert"""
def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=3_0_5_2_2 , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : Optional[Any] , )->str:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class _a ( lowerCAmelCase):
"""simple docstring"""
@property
def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 260
| 0
|
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Tuple , lowercase__ : List[str]=80 , lowercase__ : Optional[int]=16_000 , lowercase__ : Any=80 , lowercase__ : Optional[Any]=0.0 , lowercase__ : int=True , lowercase__ : Optional[int]=True , lowercase__ : Any=True , **lowercase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(feature_size=lowercase__ , sampling_rate=lowercase__ , padding_value=lowercase__ , **lowercase__)
lowerCAmelCase__ = num_mel_bins
lowerCAmelCase__ = do_ceptral_normalize
lowerCAmelCase__ = normalize_means
lowerCAmelCase__ = normalize_vars
lowerCAmelCase__ = True
def __snake_case ( self : List[Any] , lowercase__ : np.ndarray , ):
'''simple docstring'''
lowerCAmelCase__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCAmelCase__ = torch.from_numpy(lowercase__).unsqueeze(0)
lowerCAmelCase__ = ta_kaldi.fbank(lowercase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def __snake_case ( lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : Optional[bool] = True , lowercase__ : Optional[bool] = True , lowercase__ : float = 0.0 , ):
'''simple docstring'''
if normalize_means:
lowerCAmelCase__ = x[:input_length].mean(axis=0)
lowerCAmelCase__ = np.subtract(lowercase__ , lowercase__)
if normalize_vars:
lowerCAmelCase__ = x[:input_length].std(axis=0)
lowerCAmelCase__ = np.divide(lowercase__ , lowercase__)
if input_length < x.shape[0]:
lowerCAmelCase__ = padding_value
# make sure array is in float32
lowerCAmelCase__ = x.astype(np.floataa)
return x
def __snake_case ( self : Dict , lowercase__ : List[np.ndarray] , lowercase__ : Optional[np.ndarray] = None):
'''simple docstring'''
lowerCAmelCase__ = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowercase__ , lowercase__ , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(lowercase__ , lowercase__)
]
def __call__( self : List[str] , lowercase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase__ : Union[bool, str, PaddingStrategy] = False , lowercase__ : Optional[int] = None , lowercase__ : bool = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
lowerCAmelCase__ = isinstance(lowercase__ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
lowerCAmelCase__ = is_batched_numpy or (
isinstance(lowercase__ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase__ , np.ndarray):
lowerCAmelCase__ = np.asarray(lowercase__ , dtype=np.floataa)
elif isinstance(lowercase__ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase__ = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase__ = [raw_speech]
# extract fbank features
lowerCAmelCase__ = [self._extract_fbank_features(lowercase__) for waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase__ = BatchFeature({'input_features': features})
lowerCAmelCase__ = self.pad(
lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
# make sure list is in array format
lowerCAmelCase__ = padded_inputs.get('input_features')
if isinstance(input_features[0] , lowercase__):
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.floataa) for feature in input_features]
lowerCAmelCase__ = padded_inputs.get('attention_mask')
if attention_mask is not None:
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCAmelCase__ = (
np.array(lowercase__ , dtype=np.intaa)
if self._get_padding_strategies(lowercase__ , max_length=lowercase__) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase__ = self.normalize(
padded_inputs['input_features'] , attention_mask=lowercase__)
if return_tensors is not None:
lowerCAmelCase__ = padded_inputs.convert_to_tensors(lowercase__)
return padded_inputs
| 367
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
lowerCAmelCase__ = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
lowerCAmelCase__ = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
lowerCAmelCase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase__ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase__ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCAmelCase__ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCAmelCase__ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(SCREAMING_SNAKE_CASE )
class a_ :
'''simple docstring'''
def __call__( self : Optional[int] , lowercase__ : List[str] , lowercase__ : Optional[str] = None , lowercase__ : Optional[str] = None , lowercase__ : Union[bool, str] = False , lowercase__ : Union[bool, str] = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[bool] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
elif titles is None or texts is None:
lowerCAmelCase__ = titles if texts is None else texts
return super().__call__(
lowercase__ , lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
lowerCAmelCase__ = titles if not isinstance(lowercase__ , lowercase__) else [titles]
lowerCAmelCase__ = texts if not isinstance(lowercase__ , lowercase__) else [texts]
lowerCAmelCase__ = len(lowercase__)
lowerCAmelCase__ = questions if not isinstance(lowercase__ , lowercase__) else [questions] * n_passages
if len(lowercase__) != len(lowercase__):
raise ValueError(
F"""There should be as many titles than texts but got {len(lowercase__)} titles and {len(lowercase__)} texts.""")
lowerCAmelCase__ = super().__call__(lowercase__ , lowercase__ , padding=lowercase__ , truncation=lowercase__)['input_ids']
lowerCAmelCase__ = super().__call__(lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__)['input_ids']
lowerCAmelCase__ = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase__ , lowercase__)
]
}
if return_attention_mask is not False:
lowerCAmelCase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
lowerCAmelCase__ = attention_mask
return self.pad(lowercase__ , padding=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__)
def __snake_case ( self : Union[str, Any] , lowercase__ : BatchEncoding , lowercase__ : DPRReaderOutput , lowercase__ : int = 16 , lowercase__ : int = 64 , lowercase__ : int = 4 , ):
'''simple docstring'''
lowerCAmelCase__ = reader_input['input_ids']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reader_output[:3]
lowerCAmelCase__ = len(lowercase__)
lowerCAmelCase__ = sorted(range(lowercase__) , reverse=lowercase__ , key=relevance_logits.__getitem__)
lowerCAmelCase__ = []
for doc_id in sorted_docs:
lowerCAmelCase__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ = sequence_ids.index(self.pad_token_id)
else:
lowerCAmelCase__ = len(lowercase__)
lowerCAmelCase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase__ , top_spans=lowercase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase__ , start_index=lowercase__ , end_index=lowercase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowercase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __snake_case ( self : Optional[int] , lowercase__ : List[int] , lowercase__ : List[int] , lowercase__ : int , lowercase__ : int , ):
'''simple docstring'''
lowerCAmelCase__ = []
for start_index, start_score in enumerate(lowercase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
lowerCAmelCase__ = sorted(lowercase__ , key=lambda lowercase__: x[1] , reverse=lowercase__)
lowerCAmelCase__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""")
lowerCAmelCase__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowercase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class a_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ = ['input_ids', 'attention_mask']
| 119
| 0
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=36 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Tuple=6 , __SCREAMING_SNAKE_CASE : int=6 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=1_000 , ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = text_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = coordinate_size
__SCREAMING_SNAKE_CASE = shape_size
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__SCREAMING_SNAKE_CASE = text_seq_length
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 + 1
__SCREAMING_SNAKE_CASE = self.text_seq_length + self.image_seq_length
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__SCREAMING_SNAKE_CASE = bbox[i, j, 3]
__SCREAMING_SNAKE_CASE = bbox[i, j, 1]
__SCREAMING_SNAKE_CASE = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__SCREAMING_SNAKE_CASE = bbox[i, j, 2]
__SCREAMING_SNAKE_CASE = bbox[i, j, 0]
__SCREAMING_SNAKE_CASE = t
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.text_seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LayoutLMvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# text + image
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__SCREAMING_SNAKE_CASE = model(pixel_values=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = LayoutLMvaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = LayoutLMvaForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LayoutLMvaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LayoutLMvaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str=False ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = copy.deepcopy(__SCREAMING_SNAKE_CASE )
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
elif model_class in get_values(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
elif model_class in [
*get_values(__SCREAMING_SNAKE_CASE ),
]:
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
elif model_class in [
*get_values(__SCREAMING_SNAKE_CASE ),
]:
__SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE , )
return inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values.to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__SCREAMING_SNAKE_CASE = model(
input_ids=input_ids.to(__SCREAMING_SNAKE_CASE ) , bbox=bbox.to(__SCREAMING_SNAKE_CASE ) , pixel_values=pixel_values.to(__SCREAMING_SNAKE_CASE ) , )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 267
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCAmelCase : Optional[int] = 2_5_6_0_4_7
UpperCAmelCase : Union[str, Any] = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = NllbTokenizer
lowerCAmelCase__ = NllbTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = {}
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = NllbTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = NllbTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
if not self.test_seqaseq:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__SCREAMING_SNAKE_CASE = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
__SCREAMING_SNAKE_CASE = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
__SCREAMING_SNAKE_CASE = tokenizer.prepare_seqaseq_batch(
src_texts=__SCREAMING_SNAKE_CASE , tgt_texts=__SCREAMING_SNAKE_CASE , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__SCREAMING_SNAKE_CASE = tokenizer.prepare_seqaseq_batch(
__SCREAMING_SNAKE_CASE , tgt_texts=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__SCREAMING_SNAKE_CASE = tokenizer.prepare_seqaseq_batch(
src_texts=__SCREAMING_SNAKE_CASE , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , __SCREAMING_SNAKE_CASE )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__SCREAMING_SNAKE_CASE = [AddedToken("""<special>""" , lstrip=__SCREAMING_SNAKE_CASE )]
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode("""Hey this is a <special> token""" )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode("""<special>""" , add_special_tokens=__SCREAMING_SNAKE_CASE )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode("""Hey this is a <special> token""" )
__SCREAMING_SNAKE_CASE = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = "facebook/nllb-200-distilled-600M"
lowerCAmelCase__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowerCAmelCase__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowerCAmelCase__ = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
__SCREAMING_SNAKE_CASE = 1
return cls
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256_057 )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
__SCREAMING_SNAKE_CASE = [RO_CODE, 4_254, 98_068, 112_923, 39_072, 3_909, 713, 102_767, 26, 17_314, 35_642, 14_683, 33_118, 2_022, 66_987, 2, 256_047]
# fmt: on
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [256_203, 3] )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = NllbTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__SCREAMING_SNAKE_CASE = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=10 , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = targets["""input_ids"""]
__SCREAMING_SNAKE_CASE = shift_tokens_right(
__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[256_047, 70, 7_356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256_057,
} , )
@require_torch
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2, 256_047] )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [256_047, 16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2] )
| 267
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = KandinskyVaaPipeline
UpperCAmelCase_ = [
"image_embeds",
"negative_image_embeds",
]
UpperCAmelCase_ = ["image_embeds", "negative_image_embeds"]
UpperCAmelCase_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase_ = False
@property
def A_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return 3_2
@property
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 3_2
@property
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def A_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return 1_0_0
@property
def A_ ( self : str ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE__ : str = UNetaDConditionModel(**_UpperCAmelCase )
return model
@property
def A_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A_ ( self : int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.dummy_unet
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_movq
SCREAMING_SNAKE_CASE__ : Optional[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=_UpperCAmelCase, set_alpha_to_one=_UpperCAmelCase, steps_offset=1, prediction_type="epsilon", thresholding=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A_ ( self : Dict, _UpperCAmelCase : Dict, _UpperCAmelCase : int=0 ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def A_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = "cpu"
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[Any] = self.pipeline_class(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : Tuple = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ), return_dict=_UpperCAmelCase, )[0]
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.floataa )
pipe_prior.to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Any = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = "red cat, 4k photo"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="cuda" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = pipe_prior(
_UpperCAmelCase, generator=_UpperCAmelCase, num_inference_steps=5, negative_prompt="", ).to_tuple()
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device="cuda" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipeline(
image_embeds=_UpperCAmelCase, negative_image_embeds=_UpperCAmelCase, generator=_UpperCAmelCase, num_inference_steps=1_0_0, output_type="np", )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(_UpperCAmelCase, _UpperCAmelCase )
| 191
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : Any=2, _UpperCAmelCase : List[str]=3, _UpperCAmelCase : Union[str, Any]=4, _UpperCAmelCase : Union[str, Any]=2, _UpperCAmelCase : int=7, _UpperCAmelCase : Tuple=True, _UpperCAmelCase : int=True, _UpperCAmelCase : List[str]=True, _UpperCAmelCase : List[str]=True, _UpperCAmelCase : Tuple=9_9, _UpperCAmelCase : Any=3_6, _UpperCAmelCase : List[str]=2, _UpperCAmelCase : int=4, _UpperCAmelCase : str=3_7, _UpperCAmelCase : List[str]="gelu", _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : str=0.1, _UpperCAmelCase : Optional[int]=5_1_2, _UpperCAmelCase : Optional[Any]=1_6, _UpperCAmelCase : int=2, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Optional[int]=6, _UpperCAmelCase : List[Any]=6, _UpperCAmelCase : Any=3, _UpperCAmelCase : List[str]=4, _UpperCAmelCase : Tuple=None, _UpperCAmelCase : str=1_0_0_0, ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = coordinate_size
SCREAMING_SNAKE_CASE__ : Tuple = shape_size
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = num_choices
SCREAMING_SNAKE_CASE__ : Optional[Any] = scope
SCREAMING_SNAKE_CASE__ : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE__ : str = text_seq_length
SCREAMING_SNAKE_CASE__ : Dict = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE__ : List[str] = self.text_seq_length + self.image_seq_length
def A_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
SCREAMING_SNAKE_CASE__ : List[Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE__ : Tuple = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE__ : Tuple = bbox[i, j, 2]
SCREAMING_SNAKE_CASE__ : str = bbox[i, j, 0]
SCREAMING_SNAKE_CASE__ : Dict = tmp_coordinate
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.constant(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any, _UpperCAmelCase : List[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFLayoutLMvaModel(config=_UpperCAmelCase )
# text + image
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, pixel_values=_UpperCAmelCase, training=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, training=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[Any] = model(_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase, training=_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model({"pixel_values": pixel_values}, training=_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def A_ ( self : List[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : int, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : Dict, _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.num_labels
SCREAMING_SNAKE_CASE__ : List[str] = TFLayoutLMvaForSequenceClassification(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(
_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase, training=_UpperCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A_ ( self : List[Any], _UpperCAmelCase : str, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any, _UpperCAmelCase : List[str], _UpperCAmelCase : List[str], _UpperCAmelCase : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.num_labels
SCREAMING_SNAKE_CASE__ : Dict = TFLayoutLMvaForTokenClassification(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model(
_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase, training=_UpperCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def A_ ( self : Dict, _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int, _UpperCAmelCase : Any, _UpperCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE__ : str = TFLayoutLMvaForQuestionAnswering(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = model(
_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, start_positions=_UpperCAmelCase, end_positions=_UpperCAmelCase, training=_UpperCAmelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def A_ ( self : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__)) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[str] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Any, _UpperCAmelCase : Dict, _UpperCAmelCase : str, _UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return True
def A_ ( self : Optional[int], _UpperCAmelCase : Optional[int], _UpperCAmelCase : str, _UpperCAmelCase : Dict=False ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = copy.deepcopy(_UpperCAmelCase )
if model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = {
k: tf.tile(tf.expand_dims(_UpperCAmelCase, 1 ), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_UpperCAmelCase, tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.intaa )
return inputs_dict
def A_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=3_7 )
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
if getattr(_UpperCAmelCase, "hf_compute_loss", _UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(inputs_dict.copy(), _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=_UpperCAmelCase )[0]
]
SCREAMING_SNAKE_CASE__ : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(inputs_dict.copy(), _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = prepared_for_class.pop("input_ids" )
SCREAMING_SNAKE_CASE__ : Dict = model(_UpperCAmelCase, **_UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(inputs_dict.copy(), _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE__ : str = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE__ : Any = -1_0_0
SCREAMING_SNAKE_CASE__ : Dict = tf.convert_to_tensor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, **_UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(inputs_dict.copy(), _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(inputs_dict.copy(), _UpperCAmelCase, return_labels=_UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE__ : List[Any] = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE__ : Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE__ : Tuple = {0: "input_ids"}
for label_key in label_keys:
SCREAMING_SNAKE_CASE__ : str = signature_names.index(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = label_key
SCREAMING_SNAKE_CASE__ : str = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE__ : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE__ : int = prepared_for_class[value]
SCREAMING_SNAKE_CASE__ : List[Any] = tuple(_UpperCAmelCase )
# Send to model
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def A_ ( self : Dict ) -> int:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : List[Any] ) -> int:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : Optional[int] = type
self.model_tester.create_and_check_model(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Any ) -> int:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = TFLayoutLMvaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _a ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase ) if is_vision_available() else None
@slow
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Dict = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=_UpperCAmelCase, return_tensors="tf" ).pixel_values
SCREAMING_SNAKE_CASE__ : int = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE__ : int = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ), axis=0 )
# forward pass
SCREAMING_SNAKE_CASE__ : Dict = model(input_ids=_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, training=_UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], _UpperCAmelCase, atol=1E-4 ) )
| 191
| 1
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , **lowerCamelCase__ : Tuple ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Any , lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : str=None , lowerCamelCase__ : str="This is a photo of {}." ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = load_image(lowerCamelCase__ )
_UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_UpperCAmelCase : List[str] = candidate_labels
_UpperCAmelCase : Tuple = [hypothesis_template.format(lowerCamelCase__ ) for x in candidate_labels]
_UpperCAmelCase : Union[str, Any] = self.tokenizer(lowerCamelCase__ , return_tensors=self.framework , padding=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [text_inputs]
return inputs
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = model_inputs.pop("candidate_labels" )
_UpperCAmelCase : Optional[Any] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
_UpperCAmelCase : Any = text_inputs[0][0]
_UpperCAmelCase : Dict = self.model(**lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = model_outputs.pop("candidate_labels" )
_UpperCAmelCase : int = model_outputs["""logits"""][0]
if self.framework == "pt":
_UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
_UpperCAmelCase : Any = probs.tolist()
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [scores]
elif self.framework == "tf":
_UpperCAmelCase : List[str] = stable_softmax(lowerCamelCase__ , axis=-1 )
_UpperCAmelCase : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
_UpperCAmelCase : Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : -x[0] )
]
return result
| 234
|
'''simple docstring'''
from __future__ import annotations
import math
class UpperCamelCase_ :
def __init__( self , A ) -> None:
UpperCAmelCase : Optional[int] = size
# approximate the overall size of segment tree with given value
UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )]
UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _lowercase( self , A ) -> int:
return idx * 2
def _lowercase( self , A ) -> int:
return idx * 2 + 1
def _lowercase( self , A , A , A , A ) -> None:
if left_element == right_element:
UpperCAmelCase : str = a[left_element - 1]
else:
UpperCAmelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(A ) , A , A , A )
self.build(self.right(A ) , mid + 1 , A , A )
UpperCAmelCase : str = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
def _lowercase( self , A , A , A , A , A , A ) -> bool:
if self.flag[idx] is True:
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : int = False
if left_element != right_element:
UpperCAmelCase : List[str] = self.lazy[idx]
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : List[str] = True
UpperCAmelCase : int = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase : Optional[Any] = val
if left_element != right_element:
UpperCAmelCase : Tuple = val
UpperCAmelCase : int = val
UpperCAmelCase : Any = True
UpperCAmelCase : str = True
return True
UpperCAmelCase : str = (left_element + right_element) // 2
self.update(self.left(A ) , A , A , A , A , A )
self.update(self.right(A ) , mid + 1 , A , A , A , A )
UpperCAmelCase : List[str] = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
return True
def _lowercase( self , A , A , A , A , A ) -> int | float:
if self.flag[idx] is True:
UpperCAmelCase : Any = self.lazy[idx]
UpperCAmelCase : Any = False
if left_element != right_element:
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : Tuple = self.lazy[idx]
UpperCAmelCase : List[str] = True
UpperCAmelCase : Tuple = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase : Dict = (left_element + right_element) // 2
UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A )
UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A )
return max(A , A )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
a : Optional[Any] = 1_5
a : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 265
| 0
|
from torch import nn
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 97
|
lowercase__ :Any = 8.3_144_598
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase__ :Optional[Any] = 300
lowercase__ :List[Any] = 28
lowercase__ :Dict = rms_speed_of_molecule(temperature, molar_mass)
print(F'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 97
| 1
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( UpperCamelCase_ ):
for param in module.parameters():
__SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__SCREAMING_SNAKE_CASE = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = datetime.now()
__SCREAMING_SNAKE_CASE = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 100
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__magic_name__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = val
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__SCREAMING_SNAKE_CASE = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
return new_state_dict
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__SCREAMING_SNAKE_CASE = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-256:]
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.size
__SCREAMING_SNAKE_CASE = max(UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = 800 if """detection""" in checkpoint_url else 1000
__SCREAMING_SNAKE_CASE = target_max_size / current_max_size
__SCREAMING_SNAKE_CASE = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = F.to_tensor(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = F.normalize(UpperCamelCase_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
logger.info("""Converting model...""" )
# load original state dict
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__SCREAMING_SNAKE_CASE = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = val
# create HuggingFace model and load state dict
__SCREAMING_SNAKE_CASE = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 15
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = {0: """table""", 1: """table rotated"""}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
else:
__SCREAMING_SNAKE_CASE = 125
__SCREAMING_SNAKE_CASE = 6
__SCREAMING_SNAKE_CASE = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
__SCREAMING_SNAKE_CASE = TableTransformerForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# verify our conversion
__SCREAMING_SNAKE_CASE = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = Image.open(UpperCamelCase_ ).convert("""RGB""" )
__SCREAMING_SNAKE_CASE = normalize(resize(UpperCamelCase_ , UpperCamelCase_ ) ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE = model(UpperCamelCase_ )
if "detection" in checkpoint_url:
__SCREAMING_SNAKE_CASE = (1, 15, 3)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
__SCREAMING_SNAKE_CASE = (1, 125, 7)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
__SCREAMING_SNAKE_CASE = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(UpperCamelCase_ )
image_processor.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__magic_name__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 100
| 1
|
"""simple docstring"""
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
if not all(char in """01""" for char in bin_string):
raise ValueError("""Non-binary value was passed to the function""")
if not bin_string:
raise ValueError("""Empty string was passed to the function""")
__snake_case: List[Any] = """"""
while len(SCREAMING_SNAKE_CASE__) % 3 != 0:
__snake_case: int = """0""" + bin_string
__snake_case: int = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE__))
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__snake_case: Optional[Any] = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE__):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE__))
oct_string += str(SCREAMING_SNAKE_CASE__)
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Any = 250_004
__UpperCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = """<s>"""
__snake_case: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_054 )
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
__snake_case: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Tuple = tokenizer_r.save_pretrained(A )
__snake_case: Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: Tuple = tokenizer_r.from_pretrained(A )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
__snake_case: Tuple = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: List[str] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: List[Any] = tokenizer_r.from_pretrained(A )
__snake_case: Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: Any = tokenizer_r.from_pretrained(A )
__snake_case: Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : int ):
__snake_case: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: str = 1
return cls
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertIn(A , self.tokenizer.all_special_ids )
__snake_case: Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: str = self.tokenizer.decode(A , skip_special_tokens=A )
__snake_case: Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
__snake_case: Union[str, Any] = 10
__snake_case: List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def UpperCAmelCase__ ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = tempfile.mkdtemp()
__snake_case: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
__snake_case: Union[str, Any] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
__snake_case: Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
__snake_case: Dict = targets["""input_ids"""]
__snake_case: Any = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 293
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class a__ ( a__ ):
_a : List[str] = "data2vec-text"
def __init__( self , _A=3_0_5_2_2 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=2 , _A=0.02 , _A=1E-1_2 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=None , **_A , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( a__ ):
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 92
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__UpperCAmelCase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCamelCase ( snake_case__ : int ) -> Optional[int]:
UpperCamelCase : str = EfficientNetConfig()
UpperCamelCase : Union[str, Any] = CONFIG_MAP[model_name]['hidden_dim']
UpperCamelCase : Union[str, Any] = CONFIG_MAP[model_name]['width_coef']
UpperCamelCase : str = CONFIG_MAP[model_name]['depth_coef']
UpperCamelCase : List[str] = CONFIG_MAP[model_name]['image_size']
UpperCamelCase : List[str] = CONFIG_MAP[model_name]['dropout_rate']
UpperCamelCase : str = CONFIG_MAP[model_name]['dw_padding']
UpperCamelCase : str = 'huggingface/label-files'
UpperCamelCase : Optional[Any] = 'imagenet-1k-id2label.json'
UpperCamelCase : Optional[Any] = 1000
UpperCamelCase : Dict = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
UpperCamelCase : Tuple = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCamelCase : Optional[int] = idalabel
UpperCamelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
def UpperCamelCase ( snake_case__ : List[str] ) -> List[Any]:
UpperCamelCase : int = CONFIG_MAP[model_name]['image_size']
UpperCamelCase : List[str] = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=snake_case__ , )
return preprocessor
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Dict:
UpperCamelCase : int = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
UpperCamelCase : str = sorted(set(snake_case__ ) )
UpperCamelCase : int = len(snake_case__ )
UpperCamelCase : str = {b: str(snake_case__ ) for b, i in zip(snake_case__ , range(snake_case__ ) )}
UpperCamelCase : Optional[int] = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
UpperCamelCase : Union[str, Any] = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
UpperCamelCase : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCamelCase : Dict = 'efficientnet.' + item[1]
UpperCamelCase : Dict = 'classifier.weight'
UpperCamelCase : Dict = 'classifier.bias'
return key_mapping
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCamelCase : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCamelCase : str = torch.from_numpy(snake_case__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCamelCase : Any = torch.from_numpy(snake_case__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCamelCase : str = torch.from_numpy(np.transpose(snake_case__ ) )
else:
UpperCamelCase : str = torch.from_numpy(snake_case__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(snake_case__ )
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] ) -> Any:
UpperCamelCase : Union[str, Any] = model_classes[model_name](
include_top=snake_case__ , weights='imagenet' , input_tensor=snake_case__ , input_shape=snake_case__ , pooling=snake_case__ , classes=1000 , classifier_activation='softmax' , )
UpperCamelCase : Optional[int] = original_model.trainable_variables
UpperCamelCase : Optional[int] = original_model.non_trainable_variables
UpperCamelCase : Tuple = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCamelCase : List[Any] = param.numpy()
UpperCamelCase : List[str] = list(tf_params.keys() )
# Load HuggingFace model
UpperCamelCase : str = get_efficientnet_config(snake_case__ )
UpperCamelCase : Any = EfficientNetForImageClassification(snake_case__ ).eval()
UpperCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
UpperCamelCase : List[Any] = rename_keys(snake_case__ )
replace_params(snake_case__ , snake_case__ , snake_case__ )
# Initialize preprocessor and preprocess input image
UpperCamelCase : List[Any] = convert_image_processor(snake_case__ )
UpperCamelCase : Dict = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCamelCase : Optional[int] = hf_model(**snake_case__ )
UpperCamelCase : Dict = outputs.logits.detach().numpy()
# Original model inference
UpperCamelCase : Optional[int] = False
UpperCamelCase : int = CONFIG_MAP[model_name]['image_size']
UpperCamelCase : List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCamelCase : List[Any] = image.img_to_array(snake_case__ )
UpperCamelCase : str = np.expand_dims(snake_case__ , axis=0 )
UpperCamelCase : Any = original_model.predict(snake_case__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(snake_case__ , snake_case__ , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(snake_case__ ):
os.mkdir(snake_case__ )
# Save converted model and image processor
hf_model.save_pretrained(snake_case__ )
preprocessor.save_pretrained(snake_case__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
UpperCamelCase : List[str] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(snake_case__ )
hf_model.push_to_hub(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__UpperCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 119
| 0
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Any = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase : Tuple = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowercase : Dict = {"facebook/blenderbot-3B": 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase__ ( ):
snake_case_ : List[str] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case_ : Any = bs[:]
snake_case_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
snake_case_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCAmelCase__ ( _a : Any ):
snake_case_ : Any = set()
snake_case_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : Any = char
return pairs
class UpperCAmelCase_ ( lowercase_ ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : int = PRETRAINED_VOCAB_FILES_MAP
A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Any:
snake_case_ : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
snake_case_ : Optional[int] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
snake_case_ : int = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
snake_case_ : Optional[int] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
snake_case_ : Optional[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
snake_case_ : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Union[str, Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
errors=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , **a__ , )
with open(a__ , encoding="utf-8" ) as vocab_handle:
snake_case_ : Optional[Any] = json.load(a__ )
snake_case_ : Any = {v: k for k, v in self.encoder.items()}
snake_case_ : Tuple = errors # how to handle errors in decoding
snake_case_ : Any = bytes_to_unicode()
snake_case_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(a__ , encoding="utf-8" ) as merges_handle:
snake_case_ : Optional[int] = merges_handle.read().split("\n" )[1:-1]
snake_case_ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ : Union[str, Any] = dict(zip(a__ , range(len(a__ ) ) ) )
snake_case_ : Union[str, Any] = {}
snake_case_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ : Optional[int] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self ) -> Tuple:
return len(self.encoder )
def _lowerCAmelCase ( self ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
if token in self.cache:
return self.cache[token]
snake_case_ : Union[str, Any] = tuple(a__ )
snake_case_ : Tuple = get_pairs(a__ )
if not pairs:
return token
while True:
snake_case_ : List[Any] = min(a__ , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(a__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : List[Any] = bigram
snake_case_ : Optional[Any] = []
snake_case_ : Any = 0
while i < len(a__ ):
try:
snake_case_ : Any = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : List[Any] = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Optional[Any] = tuple(a__ )
snake_case_ : Tuple = new_word
if len(a__ ) == 1:
break
else:
snake_case_ : Optional[int] = get_pairs(a__ )
snake_case_ : List[str] = " ".join(a__ )
snake_case_ : List[str] = word
return word
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : Optional[Any] = []
for token in re.findall(self.pat , a__ ):
snake_case_ : List[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a__ ).split(" " ) )
return bpe_tokens
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return self.decoder.get(a__ )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ : Optional[Any] = "".join(a__ )
snake_case_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(a__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Optional[Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Union[str, Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + "\n" )
snake_case_ : Union[str, Any] = 0
with open(a__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case_ : List[Any] = token_index
writer.write(" ".join(a__ ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a__ ) > 0 and not text[0].isspace()):
snake_case_ : str = " " + text
return (text, kwargs)
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> str:
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[int]:
snake_case_ : str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(a__ )
snake_case_ : Dict = " ".join(a__ )
snake_case_ : Tuple = self.encode(a__ )
if len(a__ ) > self.model_max_length:
snake_case_ : Dict = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 353
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=4 , ) -> List[str]:
snake_case_ : Dict = parent
snake_case_ : List[Any] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : List[str] = use_attention_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Tuple = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : Optional[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : List[Any] = num_choices
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : str = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : str = True
snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : List[str] = True
A : List[str] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : List[str] = FlaxBertModelTester(self )
@slow
def _lowerCAmelCase ( self ) -> Dict:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
snake_case_ : int = FlaxBertModel.from_pretrained("bert-base-cased" )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 36
| 0
|
"""simple docstring"""
import inspect
import unittest
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
__SCREAMING_SNAKE_CASE :int = inspect.getmembers(SCREAMING_SNAKE_CASE__ ,inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__SCREAMING_SNAKE_CASE :Optional[int] = '''k-diffusion'''
elif backend == "invisible_watermark":
__SCREAMING_SNAKE_CASE :int = '''invisible-watermark'''
assert backend in deps, f'''{backend} is not in the deps table!'''
| 191
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[str] = '''vivit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=[2, 16, 16] ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu_fast" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-06 ,SCREAMING_SNAKE_CASE__=True ,**SCREAMING_SNAKE_CASE__ ,) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = hidden_size
__SCREAMING_SNAKE_CASE :List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE :Any = hidden_act
__SCREAMING_SNAKE_CASE :Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Any = initializer_range
__SCREAMING_SNAKE_CASE :Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE :Optional[int] = image_size
__SCREAMING_SNAKE_CASE :List[str] = num_frames
__SCREAMING_SNAKE_CASE :Any = tubelet_size
__SCREAMING_SNAKE_CASE :str = num_channels
__SCREAMING_SNAKE_CASE :Any = qkv_bias
super().__init__(**SCREAMING_SNAKE_CASE__ )
| 191
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
class _UpperCAmelCase( __a ):
"""simple docstring"""
lowercase__ = 'upernet'
def __init__( self , __a=None , __a=5_12 , __a=0.02 , __a=[1, 2, 3, 6] , __a=True , __a=0.4 , __a=3_84 , __a=2_56 , __a=1 , __a=False , __a=2_55 , **__a , ) -> str:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''])
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(UpperCamelCase__)
_UpperCamelCase = backbone_config
_UpperCamelCase = hidden_size
_UpperCamelCase = initializer_range
_UpperCamelCase = pool_scales
_UpperCamelCase = use_auxiliary_head
_UpperCamelCase = auxiliary_loss_weight
_UpperCamelCase = auxiliary_in_channels
_UpperCamelCase = auxiliary_channels
_UpperCamelCase = auxiliary_num_convs
_UpperCamelCase = auxiliary_concat_input
_UpperCamelCase = loss_ignore_index
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 369
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_a = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = _TestCommandArgs(dataset=__snake_case, all_configs=__snake_case, save_infos=__snake_case )
_UpperCamelCase = TestCommand(*__snake_case )
test_command.run()
_UpperCamelCase = os.path.join(__snake_case, '''README.md''' )
assert os.path.exists(__snake_case )
_UpperCamelCase = DatasetInfosDict.from_directory(__snake_case )
_UpperCamelCase = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ), splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
], download_size=3_94_06_80, dataset_size=2_58_99_81, )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
_UpperCamelCase , _UpperCamelCase = getattr(dataset_infos['''default'''], __snake_case ), getattr(expected_dataset_infos['''default'''], __snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case, __snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes, expected[split].num_bytes )
else:
result == expected
| 100
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ :Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ :Optional[int] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ :Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ :Tuple = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ :int = 8
else:
UpperCamelCase__ :Dict = None
return tokenizer.pad(
__a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCamelCase__ :List[str] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCamelCase__ :Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __a ) == "1":
UpperCamelCase__ :List[str] = 2
# New Code #
UpperCamelCase__ :Dict = int(args.gradient_accumulation_steps )
UpperCamelCase__ :str = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase__ :List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__a )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Tuple = config['''lr''']
UpperCamelCase__ :str = int(config['''num_epochs'''] )
UpperCamelCase__ :int = int(config['''seed'''] )
UpperCamelCase__ :Optional[Any] = int(config['''batch_size'''] )
UpperCamelCase__ :List[str] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :str = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :List[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ :Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ :Dict = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
UpperCamelCase__ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
with LocalSGD(
accelerator=__a , model=__a , local_sgd_steps=__a , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__a ):
UpperCamelCase__ :Dict = model(**__a )
UpperCamelCase__ :Optional[Any] = output.loss
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :int = model(**__a )
UpperCamelCase__ :List[str] = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ , UpperCamelCase__ :List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __a )
def a ( ) -> Any:
'''simple docstring'''
UpperCamelCase__ :int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__a , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=__a , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCamelCase__ :List[Any] = parser.parse_args()
UpperCamelCase__ :Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 97
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 97
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__UpperCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __A ( __lowerCamelCase=None ) -> Union[str, Any]:
if subparsers is not None:
a = subparsers.add_parser("""tpu-config""" , description=_description )
else:
a = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
a = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=__lowerCamelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=__lowerCamelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
a = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=__lowerCamelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=__lowerCamelCase )
return parser
def __A ( __lowerCamelCase ) -> Union[str, Any]:
a = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCamelCase ):
a = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a = defaults.command_file
if not args.command and defaults.commands is not None:
a = defaults.commands
if not args.tpu_name:
a = defaults.tpu_name
if not args.tpu_zone:
a = defaults.tpu_zone
if args.accelerate_version == "dev":
a = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
a = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , __lowerCamelCase ):
a = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
a = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCamelCase ):
a = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
a = """; """.join(__lowerCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(__lowerCamelCase )}' )
return
subprocess.run(__lowerCamelCase )
print("""Successfully setup pod.""" )
def __A ( ) -> Dict:
a = tpu_command_parser()
a = parser.parse_args()
tpu_command_launcher(__lowerCamelCase )
| 347
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
UpperCamelCase__ = '''nat'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__magic_name__ )
a = num_heads
a = kernel_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
a = layer_scale_init_value
a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 347
| 1
|
from __future__ import annotations
from fractions import Fraction
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( __lowerCAmelCase )-> list[str]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 11
_UpperCAmelCase = int('1' + '0' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
_UpperCAmelCase = 10
return solutions
def __A ( __lowerCAmelCase = 2 )-> int:
"""simple docstring"""
_UpperCAmelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
_UpperCAmelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 39
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__A = Lock()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase__ :Any = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase__ :Tuple = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase__ :Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase__ :Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Optional[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase__ :List[str] = Pipe()
lowerCAmelCase__ :List[Any] = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCAmelCase__ :Dict = temp_rs
lowerCAmelCase__ :Optional[Any] = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
lowerCAmelCase__ :Union[str, Any] = Pipe()
lowerCAmelCase__ :List[str] = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCAmelCase__ :Union[str, Any] = temp_rs
lowerCAmelCase__ :Any = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 293
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 196
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] =logging.get_logger(__name__)
lowerCamelCase : Optional[Any] ={
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __a ( A__ ):
_lowerCAmelCase : str = '''decision_transformer'''
_lowerCAmelCase : Optional[Any] = ['''past_key_values''']
_lowerCAmelCase : Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=17 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : Optional[int]=1_28 , SCREAMING_SNAKE_CASE : Dict=40_96 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Optional[int]=10_24 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict="relu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1e-5 , SCREAMING_SNAKE_CASE : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : int=5_02_56 , SCREAMING_SNAKE_CASE : List[Any]=5_02_56 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=False , **SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = state_dim
UpperCamelCase__ : Optional[int] = act_dim
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Any = max_ep_len
UpperCamelCase__ : Optional[Any] = action_tanh
UpperCamelCase__ : str = vocab_size
UpperCamelCase__ : List[str] = n_positions
UpperCamelCase__ : Tuple = n_layer
UpperCamelCase__ : Union[str, Any] = n_head
UpperCamelCase__ : Dict = n_inner
UpperCamelCase__ : int = activation_function
UpperCamelCase__ : List[str] = resid_pdrop
UpperCamelCase__ : Optional[int] = embd_pdrop
UpperCamelCase__ : Optional[Any] = attn_pdrop
UpperCamelCase__ : Optional[Any] = layer_norm_epsilon
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Dict = scale_attn_weights
UpperCamelCase__ : Tuple = use_cache
UpperCamelCase__ : List[str] = scale_attn_by_inverse_layer_idx
UpperCamelCase__ : Union[str, Any] = reorder_and_upcast_attn
UpperCamelCase__ : Optional[Any] = bos_token_id
UpperCamelCase__ : Optional[int] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 196
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "WhisperFeatureExtractor"
__UpperCamelCase = "WhisperTokenizer"
def __init__( self : List[str] , lowercase_ : List[Any] , lowercase_ : Dict):
'''simple docstring'''
super().__init__(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : Dict = False
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=True):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowercase_ , language=lowercase_ , no_timestamps=lowercase_)
def __call__( self : Dict , *lowercase_ : Dict , **lowercase_ : Union[str, Any]):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_)
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('''sampling_rate''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('''text''' , lowercase_)
if len(lowercase_) > 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = args[0]
SCREAMING_SNAKE_CASE_ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
if text is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(lowercase_ , **lowercase_)
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encodings['''input_ids''']
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[Any]):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str , lowercase_ : Optional[Any]="np"):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(lowercase_ , return_tensors=lowercase_)
| 91
|
from __future__ import annotations
import bisect
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : int = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_lowerCAmelCase : Union[str, Any] = mid + 1
else:
_lowerCAmelCase : str = mid
return lo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : str = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_lowerCAmelCase : Dict = mid + 1
else:
_lowerCAmelCase : str = mid
return lo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) - 1
while left <= right:
_lowerCAmelCase : int = left + (right - left) // 2
_lowerCAmelCase : int = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_lowerCAmelCase : str = midpoint - 1
else:
_lowerCAmelCase : Any = midpoint + 1
return None
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase )
if index != len(_lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if right < left:
return None
_lowerCAmelCase : Optional[int] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = sorted(int(item) for item in user_input.split(","))
_snake_case = int(input("Enter a single number to be found in the list:\n"))
_snake_case = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 36
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class a__ ( __snake_case ):
A__ : Optional[Any] = 'git_vision_model'
def __init__( self , UpperCAmelCase=7_6_8 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3 , UpperCAmelCase=2_2_4 , UpperCAmelCase=1_6 , UpperCAmelCase="quick_gelu" , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , **UpperCAmelCase , ) -> List[str]:
super().__init__(**UpperCAmelCase )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = num_channels
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase , **UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase )
__a , __a = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
__a = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class a__ ( __snake_case ):
A__ : Dict = 'git'
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=6 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1_0_1 , UpperCAmelCase=1_0_2 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Any:
super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , pad_token_id=UpperCAmelCase , **UpperCAmelCase )
if vision_config is None:
__a = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
__a = GitVisionConfig(**UpperCAmelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = tie_word_embeddings
__a = num_image_with_embedding
__a = bos_token_id
__a = eos_token_id
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.__class__.model_type
return output
| 368
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase( __lowerCamelCase ):
for param in module.parameters():
__a = False
def lowerCAmelCase( ):
__a = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__a = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def lowerCAmelCase( __lowerCamelCase ):
__a = plt.imshow(__lowerCamelCase )
fig.axes.get_xaxis().set_visible(__lowerCamelCase )
fig.axes.get_yaxis().set_visible(__lowerCamelCase )
plt.show()
def lowerCAmelCase( ):
__a = datetime.now()
__a = current_time.strftime('%H:%M:%S' )
return timestamp
| 197
| 0
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 11
__SCREAMING_SNAKE_CASE = int("""1""" + """0""" * digit_len )
for num in range(UpperCamelCase_ , UpperCamelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCamelCase_ , UpperCamelCase_ ):
solutions.append(f"{num}/{den}" )
den += 1
num += 1
__SCREAMING_SNAKE_CASE = 10
return solutions
def _lowerCAmelCase ( UpperCamelCase_ = 2 ):
__SCREAMING_SNAKE_CASE = 1.0
for fraction in fraction_list(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = Fraction(UpperCamelCase_ )
result *= frac.denominator / frac.numerator
return int(UpperCamelCase_ )
if __name__ == "__main__":
print(solution())
| 100
| 0
|
import math
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_UpperCAmelCase)
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen')
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
a_ : Any = 'Enter the base and the power separated by a comma: '
a_ , a_ : List[str] = map(int, input(prompt).split(','))
a_ , a_ : Union[str, Any] = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
a_ : List[Any] = res(xa, ya)
a_ : Union[str, Any] = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 327
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a , cache_dir=a)
SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(a , os.listdir(a)[0] , 'snapshots'))]
SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin') for f in files)
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1E-3
assert np.abs(np.abs(a , dtype=np.floataa).sum() - 4_99_47.8_75) < 5E-1
SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(a) == num_samples
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_38_38_08.2)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=a , steps_offset=1 , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=a , safety_checker=a , )
SCREAMING_SNAKE_CASE = scheduler.create_state()
SCREAMING_SNAKE_CASE = scheduler_state
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_34_76_93.5)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0) , a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a , )
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a , use_memory_efficient_attention=a , )
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , jit=a).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 327
| 1
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__SCREAMING_SNAKE_CASE : Optional[Any] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def _a ( _SCREAMING_SNAKE_CASE=None ) -> Dict:
if subparsers is not None:
snake_case_ = subparsers.add_parser("""tpu-config""" , description=_description )
else:
snake_case_ = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
snake_case_ = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_SCREAMING_SNAKE_CASE , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_SCREAMING_SNAKE_CASE , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
snake_case_ = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_SCREAMING_SNAKE_CASE , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
snake_case_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case_ = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case_ = defaults.commands
if not args.tpu_name:
snake_case_ = defaults.tpu_name
if not args.tpu_zone:
snake_case_ = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case_ = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
snake_case_ = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , _SCREAMING_SNAKE_CASE ):
snake_case_ = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
snake_case_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _SCREAMING_SNAKE_CASE ):
snake_case_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case_ = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
snake_case_ = """; """.join(_SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case_ = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {" ".join(_SCREAMING_SNAKE_CASE )}""" )
return
subprocess.run(_SCREAMING_SNAKE_CASE )
print("""Successfully setup pod.""" )
def _a ( ) -> List[Any]:
snake_case_ = tpu_command_parser()
snake_case_ = parser.parse_args()
tpu_command_launcher(_SCREAMING_SNAKE_CASE )
| 347
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
| 347
| 1
|
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : str = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
__UpperCAmelCase : Tuple = hex_num[0] == """-"""
if is_negative:
__UpperCAmelCase : Union[str, Any] = hex_num[1:]
try:
__UpperCAmelCase : Optional[Any] = int(lowerCAmelCase__ , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
__UpperCAmelCase : Union[str, Any] = """"""
while int_num > 0:
__UpperCAmelCase : int = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : int = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : int = scope
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[Any] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> List[str]:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_config()
__UpperCAmelCase : List[Any] = 300
return config
def __A ( self ) -> Dict:
'''simple docstring'''
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = self.prepare_config_and_inputs()
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : int = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : str = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : int = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Dict = ()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = MraModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
return
@require_torch
class _A ( unittest.TestCase ):
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__UpperCAmelCase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : int = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Union[str, Any] = 50_265
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : int = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : Any = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Dict = 50_265
__UpperCAmelCase : Optional[int] = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : str = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 16
| 0
|
from math import factorial
__lowerCAmelCase = {str(d): factorial(d) for d in range(10)}
def snake_case_ ( snake_case ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case ) )
def snake_case_ ( ) -> int:
lowercase__: Optional[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case ) if sum_of_digit_factorial(snake_case ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 196
|
def snake_case_ ( snake_case , snake_case ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__: str = str(bin(snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def snake_case_ ( snake_case , snake_case ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__: Optional[Any] = str(bin(snake_case ) )[2:]
if shift_amount >= len(snake_case ):
return "0b0"
lowercase__: Optional[int] = binary_number[: len(snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def snake_case_ ( snake_case , snake_case ) -> str:
if number >= 0: # Get binary representation of positive number
lowercase__: Union[str, Any] = '0' + str(bin(snake_case ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase__: Dict = len(bin(snake_case )[3:] ) # Find 2's complement of number
lowercase__: int = bin(abs(snake_case ) - (1 << binary_number_length) )[3:]
lowercase__: Any = (
'1' + '0' * (binary_number_length - len(snake_case )) + binary_number
)
if shift_amount >= len(snake_case ):
return "0b" + binary_number[0] * len(snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196
| 1
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Optional[Any]:
snake_case = r"""\w+[.]\d+"""
snake_case = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
snake_case = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> List[str]:
snake_case = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
snake_case = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
snake_case = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
snake_case = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
snake_case = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
snake_case = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
snake_case = flatten_dict(__lowerCAmelCase )
snake_case = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case = rename_key(__lowerCAmelCase )
snake_case = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
snake_case , snake_case = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
snake_case = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 3
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3
| 1
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=snake_case_ ):
__magic_name__: List[str] = ['onnx']
def __init__( self : str , *_A : str , **_A : Tuple ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['onnx'] )
@classmethod
def UpperCAmelCase_ ( cls : Union[str, Any] , *_A : Optional[Any] , **_A : List[str] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['onnx'] )
@classmethod
def UpperCAmelCase_ ( cls : int , *_A : int , **_A : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['onnx'] )
| 327
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) -> int:
'''simple docstring'''
lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase = min(lowerCAmelCase__ , lowerCAmelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 197
| 0
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=7, A=True, A=True, A=True, A=True, A=99, A=32, A=2, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=3, A=4, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Tuple = 13
SCREAMING_SNAKE_CASE : List[str] = 7
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Optional[Any] = 99
SCREAMING_SNAKE_CASE : str = 384
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : List[str] = 37
SCREAMING_SNAKE_CASE : Union[str, Any] = 'gelu'
SCREAMING_SNAKE_CASE : Dict = 0.1
SCREAMING_SNAKE_CASE : List[Any] = 0.1
SCREAMING_SNAKE_CASE : Optional[int] = 512
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Dict = 0.02
SCREAMING_SNAKE_CASE : Dict = 3
SCREAMING_SNAKE_CASE : List[str] = 4
SCREAMING_SNAKE_CASE : int = 128
SCREAMING_SNAKE_CASE : Tuple = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = 9
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : List[str] = None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE : Any = ConvBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=A, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertModel(config=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(A )
SCREAMING_SNAKE_CASE : Tuple = model(A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFConvBertForMaskedLM(config=A )
SCREAMING_SNAKE_CASE : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = TFConvBertForSequenceClassification(config=A )
SCREAMING_SNAKE_CASE : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = model(A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertForMultipleChoice(config=A )
SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(A, 1 ), (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(A, 1 ), (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(A, 1 ), (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = model(A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForTokenClassification(config=A )
SCREAMING_SNAKE_CASE : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE : List[str] = model(A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(A )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A : Any = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[Any] = False
A : int = False
A : Any = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self, config_class=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Dict = True
if hasattr(A, 'use_cache' ):
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Tuple = getattr(self.model_tester, 'encoder_seq_length', self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester, 'key_length', A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(A, A )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = len(model(A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A, saved_model=A )
SCREAMING_SNAKE_CASE : Any = os.path.join(A, 'saved_model', '1' )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.models.load_model(A )
SCREAMING_SNAKE_CASE : str = model(A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[Any] = outputs['encoder_hidden_states']
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs['encoder_attentions']
else:
SCREAMING_SNAKE_CASE : Optional[Any] = outputs['hidden_states']
SCREAMING_SNAKE_CASE : str = outputs['attentions']
self.assertEqual(len(A ), A )
SCREAMING_SNAKE_CASE : Dict = getattr(
self.model_tester, 'expected_num_hidden_layers', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A ), A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ), [self.model_tester.seq_length, self.model_tester.hidden_size], )
self.assertEqual(len(A ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester, 'decoder_seq_length', self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(self.model_tester, 'encoder_seq_length', self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[Any] = getattr(self.model_tester, 'key_length', A )
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(self.model_tester, 'key_length', A )
def check_decoder_attentions_output(A ):
SCREAMING_SNAKE_CASE : List[str] = len(A )
self.assertEqual(out_len % 2, 0 )
SCREAMING_SNAKE_CASE : str = outputs.decoder_attentions
self.assertEqual(len(A ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], )
def check_encoder_attentions_output(A ):
SCREAMING_SNAKE_CASE : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Dict = model_class(A )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : str = len(A )
self.assertEqual(config.output_hidden_states, A )
check_encoder_attentions_output(A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Any = model_class(A )
SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(A, A ) )
self.assertEqual(config.output_hidden_states, A )
check_decoder_attentions_output(A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(A, A ) )
self.assertEqual(config.output_hidden_states, A )
check_encoder_attentions_output(A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = model_class(A )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(A, A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(A ) )
self.assertEqual(model.config.output_hidden_states, A )
check_encoder_attentions_output(A )
@require_tf
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Dict = model(A )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 6, 768]
self.assertEqual(output.shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3], A, atol=1E-4 )
| 354
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''openai-gpt'''
A : Dict = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A="gelu", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A="cls_index", A=True, A=None, A=True, A=0.1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = n_positions
SCREAMING_SNAKE_CASE : Tuple = n_embd
SCREAMING_SNAKE_CASE : Any = n_layer
SCREAMING_SNAKE_CASE : Tuple = n_head
SCREAMING_SNAKE_CASE : Union[str, Any] = afn
SCREAMING_SNAKE_CASE : List[str] = resid_pdrop
SCREAMING_SNAKE_CASE : List[str] = embd_pdrop
SCREAMING_SNAKE_CASE : Tuple = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = summary_type
SCREAMING_SNAKE_CASE : int = summary_use_proj
SCREAMING_SNAKE_CASE : Union[str, Any] = summary_activation
SCREAMING_SNAKE_CASE : List[str] = summary_first_dropout
SCREAMING_SNAKE_CASE : Any = summary_proj_to_labels
super().__init__(**A )
| 246
| 0
|
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__a )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_SCREAMING_SNAKE_CASE = """Enter the base and the power separated by a comma: """
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = map(int, input(prompt).split(""","""))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
_SCREAMING_SNAKE_CASE = res(xa, ya)
_SCREAMING_SNAKE_CASE = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 327
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE = get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : Dict = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Dict = os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Dict = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Dict = os.path.join(__a , __a )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving model to {ckpt_dir}""" )
snake_case_ : int = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Optional[Any] = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[Any] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Optional[Any] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Tuple = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Tuple = (
os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
snake_case_ : List[Any] = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
snake_case_ : Any = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__a )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ : str = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : Any = os.path.join(__a , __a )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__a , __a )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ : Union[str, Any] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : List[Any] = os.path.join(__a , __a )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
snake_case_ : str = (
os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
snake_case_ : Any = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , )
snake_case_ : Optional[int] = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a )
| 327
| 1
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''owlvit_text_model'''
def __init__( self , _UpperCAmelCase=4_9408 , _UpperCAmelCase=512 , _UpperCAmelCase=2048 , _UpperCAmelCase=12 , _UpperCAmelCase=8 , _UpperCAmelCase=16 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0 , _UpperCAmelCase=4_9406 , _UpperCAmelCase=4_9407 , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[int] = vocab_size
__A : List[str] = hidden_size
__A : Any = intermediate_size
__A : str = num_hidden_layers
__A : int = num_attention_heads
__A : Union[str, Any] = max_position_embeddings
__A : Optional[int] = hidden_act
__A : Optional[int] = layer_norm_eps
__A : List[str] = attention_dropout
__A : int = initializer_range
__A : List[Any] = initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase)
__A : Dict = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase)
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type') == "owlvit":
__A : Dict = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''owlvit_vision_model'''
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=3072 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=768 , _UpperCAmelCase=32 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : List[Any] = hidden_size
__A : Optional[int] = intermediate_size
__A : int = num_hidden_layers
__A : List[str] = num_attention_heads
__A : Dict = num_channels
__A : Dict = image_size
__A : Dict = patch_size
__A : Tuple = hidden_act
__A : int = layer_norm_eps
__A : Optional[int] = attention_dropout
__A : int = initializer_range
__A : List[Any] = initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase)
__A : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase)
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type') == "owlvit":
__A : Optional[int] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''owlvit'''
lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=512 , _UpperCAmelCase=2.6592 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
if text_config is None:
__A : str = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.')
if vision_config is None:
__A : int = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.')
__A : Dict = OwlViTTextConfig(**_UpperCAmelCase)
__A : List[str] = OwlViTVisionConfig(**_UpperCAmelCase)
__A : Dict = projection_dim
__A : Optional[int] = logit_scale_init_value
__A : Optional[Any] = return_dict
__A : str = 1.0
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase)
__A : int = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase)
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
__A : int = {}
__A : str = text_config
__A : Any = vision_config
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = copy.deepcopy(self.__dict__)
__A : List[Any] = self.text_config.to_dict()
__A : Tuple = self.vision_config.to_dict()
__A : int = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
])
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
])
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return 1e-4
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = None , ):
'''simple docstring'''
__A : List[str] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , framework=_UpperCAmelCase)
__A : Union[str, Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=_UpperCAmelCase , framework=_UpperCAmelCase)
return {**text_input_dict, **image_input_dict}
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return 14
| 355
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase = None):
'''simple docstring'''
__A : str = value
__A : Node | None = None # Added in order to delete a node easier
__A : Node | None = None
__A : Node | None = None
def __repr__( self):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value)
return pformat({F'{self.value}': (self.left, self.right)} , indent=1)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase = None):
'''simple docstring'''
__A : int = root
def __str__( self):
'''simple docstring'''
return str(self.root)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if new_children is not None: # reset its kids
__A : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_UpperCAmelCase): # If it is the right children
__A : int = new_children
else:
__A : Union[str, Any] = new_children
else:
__A : Tuple = new_children
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.root is None
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = Node(_UpperCAmelCase) # create a new Node
if self.empty(): # if Tree is empty
__A : Union[str, Any] = new_node # set its root
else: # Tree is not empty
__A : Dict = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__A : Tuple = new_node # We insert the new node in a leaf
break
else:
__A : str = parent_node.left
else:
if parent_node.right is None:
__A : List[str] = new_node
break
else:
__A : int = parent_node.right
__A : int = parent_node
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase):
'''simple docstring'''
for value in values:
self.__insert(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.')
else:
__A : Any = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__A : List[Any] = node.left if value < node.value else node.right
return node
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase = None):
'''simple docstring'''
if node is None:
if self.root is None:
return None
__A : str = self.root
if not self.empty():
while node.right is not None:
__A : Dict = node.right
return node
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase = None):
'''simple docstring'''
if node is None:
__A : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
__A : Optional[int] = self.root
while node.left is not None:
__A : Tuple = node.left
return node
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = self.search(_UpperCAmelCase) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_UpperCAmelCase , _UpperCAmelCase)
elif node.left is None: # Has only right children
self.__reassign_nodes(_UpperCAmelCase , node.right)
elif node.right is None: # Has only left children
self.__reassign_nodes(_UpperCAmelCase , node.left)
else:
__A : str = self.get_max(
node.left) # Gets the max value of the left branch
self.remove(tmp_node.value) # type: ignore
__A : List[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left)
yield from self.preorder_traverse(node.right)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=None):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root)
else:
return traversal_function(self.root)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if node:
self.inorder(_UpperCAmelCase , node.left)
arr.append(node.value)
self.inorder(_UpperCAmelCase , node.right)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : list[int] = []
self.inorder(_UpperCAmelCase , _UpperCAmelCase) # append all values to list using inorder traversal
return arr[k - 1]
def _lowerCAmelCase ( __snake_case : Node | None ) -> list[Node]:
__A : Tuple = []
if curr_node is not None:
__A : Optional[Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _lowerCAmelCase ( ) -> None:
__A : Optional[int] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__A : str = BinarySearchTree()
for i in testlist:
t.insert(__snake_case )
# Prints all the elements of the list in order traversal
print(__snake_case )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__snake_case )
print(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 190
| 0
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A_ ):
"""simple docstring"""
def __init__( self : Any , *_A : int , **_A : str ):
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 303
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16
| 0
|
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def a__ ( __lowercase , __lowercase , __lowercase ) -> Any:
# Initialise PyTorch model
_A = AlbertConfig.from_json_file(__lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
_A = AlbertForPreTraining(__lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 361
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = GPTSanJapaneseTokenizer
__UpperCamelCase = False
__UpperCamelCase = {'do_clean_text': False, 'add_prefix_space': False}
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
_A = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_A = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_A = {"unk_token": "<unk>"}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(a__ ) )
def a_ ( self : str , **a__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : Dict , a__ : str ) -> int:
'''simple docstring'''
_A = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_A = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def a_ ( self : Union[str, Any] , a__ : Optional[int] ) -> Any:
'''simple docstring'''
_A , _A = self.get_input_output_texts(a__ )
_A = tokenizer.encode(a__ , add_special_tokens=a__ )
_A = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
return text, ids
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
pass # TODO add if relevant
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = self.get_tokenizer()
# Testing tokenization
_A = "こんにちは、世界。 こんばんは、㔺界。"
_A = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_A = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_A = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_A = tokens + [tokenizer.unk_token]
_A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_A = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , a__ )
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
_A = self.get_tokenizer()
# Testing tokenization
_A = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_A = "こんにちは、、、、世界。こんばんは、、、、世界。"
_A = tokenizer.encode(a__ )
_A = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
@slow
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_A = "こんにちは、世界。"
_A = "こんばんは、㔺界。😀"
_A = "こんにちは、世界。こんばんは、世界。😀"
_A = tokenizer.encode(prefix_text + input_text )
_A = tokenizer.encode("" , prefix_text=prefix_text + input_text )
_A = tokenizer.encode(a__ , prefix_text=a__ )
_A = tokenizer.decode(a__ )
_A = tokenizer.decode(a__ )
_A = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
@slow
def a_ ( self : str ) -> str:
'''simple docstring'''
_A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_A = "こんにちは、世界。"
_A = "こんばんは、㔺界。😀"
_A = len(tokenizer.encode(a__ ) ) - 2
_A = len(tokenizer.encode(a__ ) ) - 2
_A = [1] + [0] * (len_prefix + len_text + 1)
_A = [1] * (len_prefix + len_text + 1) + [0]
_A = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_A = tokenizer(prefix_text + input_text ).token_type_ids
_A = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
_A = tokenizer(a__ , prefix_text=a__ ).token_type_ids
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
@slow
def a_ ( self : int ) -> Dict:
'''simple docstring'''
_A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_A = tokenizer.encode("あンいワ" )
_A = tokenizer.encode("" , prefix_text="あンいワ" )
_A = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) )
self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) )
self.assertNotEqual(a__ , a__ )
self.assertNotEqual(a__ , a__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_A = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_A = tokenizer(a__ , padding=a__ )
_A = tokenizer.batch_encode_plus(a__ , padding=a__ )
# fmt: off
_A = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
_A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , a__ )
self.assertListEqual(x_token.token_type_ids , a__ )
self.assertListEqual(x_token.attention_mask , a__ )
self.assertListEqual(x_token_a.input_ids , a__ )
self.assertListEqual(x_token_a.token_type_ids , a__ )
self.assertListEqual(x_token_a.attention_mask , a__ )
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
| 163
| 0
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = R'''\w+[.]\d+'''
A : Optional[int] = re.findall(snake_case__ , snake_case__ )
for pat in pats:
A : List[Any] = key.replace(snake_case__ , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A : Tuple = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A : int = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A : Dict = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
A : List[str] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : List[str] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : Any = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=42 ):
'''simple docstring'''
A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A : Optional[int] = flax_model.init_weights(PRNGKey(snake_case__ ) )
A : Optional[Any] = flatten_dict(snake_case__ )
A : int = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : Any = rename_key(snake_case__ )
A : Dict = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
A, A : Optional[Any] = rename_key_and_reshape_tensor(snake_case__ , snake_case__ , snake_case__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
A : Union[str, Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
| 3
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ )
A : Dict = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A : Dict = dataset_size < in_memory_max_size
else:
A : Tuple = False
A : int = is_small_dataset(snake_case__ )
assert result == expected
| 3
| 1
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCAmelCase__ ( A_ ):
__a = """umt5"""
__a = ["""past_key_values"""]
def __init__( self : str , _lowerCamelCase : Optional[Any]=250112 , _lowerCamelCase : List[Any]=512 , _lowerCamelCase : int=64 , _lowerCamelCase : Dict=1024 , _lowerCamelCase : List[Any]=8 , _lowerCamelCase : int=None , _lowerCamelCase : str=6 , _lowerCamelCase : Union[str, Any]=32 , _lowerCamelCase : Union[str, Any]=128 , _lowerCamelCase : str=0.1 , _lowerCamelCase : Tuple=1e-6 , _lowerCamelCase : List[Any]=1.0 , _lowerCamelCase : List[str]="gated-gelu" , _lowerCamelCase : Any=True , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Tuple="T5Tokenizer" , _lowerCamelCase : List[str]=True , _lowerCamelCase : Tuple=0 , _lowerCamelCase : List[str]=1 , _lowerCamelCase : Optional[Any]=0 , **_lowerCamelCase : Dict , ):
super().__init__(
is_encoder_decoder=_lowerCamelCase , tokenizer_class=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
_snake_case = vocab_size
_snake_case = d_model
_snake_case = d_kv
_snake_case = d_ff
_snake_case = num_layers
_snake_case = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_snake_case = num_heads
_snake_case = relative_attention_num_buckets
_snake_case = relative_attention_max_distance
_snake_case = dropout_rate
_snake_case = layer_norm_epsilon
_snake_case = initializer_factor
_snake_case = feed_forward_proj
_snake_case = use_cache
_snake_case = self.feed_forward_proj.split('''-''' )
_snake_case = act_info[-1]
_snake_case = act_info[0] == '''gated'''
if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
_snake_case = '''gelu_new'''
@property
def lowercase ( self : Dict ):
return self.d_model
@property
def lowercase ( self : Union[str, Any] ):
return self.num_heads
@property
def lowercase ( self : Tuple ):
return self.num_layers
class lowerCAmelCase__ ( A_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowercase ( self : Union[str, Any] ):
_snake_case = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_snake_case = '''past_encoder_sequence + sequence'''
_snake_case = {0: '''batch'''}
_snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
_snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowercase ( self : Optional[Any] ):
return 13
@property
def lowercase ( self : Optional[Any] ):
return 5e-4
| 356
|
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> list[list[int]]:
_snake_case = []
_snake_case = []
_snake_case = 0
_snake_case = sum(__lowerCamelCase )
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return result
def _UpperCAmelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , ) -> None:
if sum(__lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(__lowerCamelCase )) < max_sum:
return
if sum(__lowerCamelCase ) == max_sum:
result.append(__lowerCamelCase )
return
for index in range(__lowerCamelCase , len(__lowerCamelCase ) ):
create_state_space_tree(
__lowerCamelCase , __lowerCamelCase , index + 1 , [*path, nums[index]] , __lowerCamelCase , remaining_nums_sum - nums[index] , )
UpperCAmelCase__ = [3, 34, 4, 12, 5, 2]
UpperCAmelCase__ = 9
UpperCAmelCase__ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 40
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase :List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase :int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = state_dict.pop(_lowerCAmelCase )
__magic_name__ : Union[str, Any] = val
def lowerCamelCase ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__magic_name__ : Tuple = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
__magic_name__ : Any = value
else:
__magic_name__ : List[Any] = value
return new_state_dict
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple=False ):
"""simple docstring"""
__magic_name__ : int = """"""
if is_panoptic:
__magic_name__ : str = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__magic_name__ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__magic_name__ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : Any = in_proj_weight[:256, :]
__magic_name__ : Tuple = in_proj_bias[:256]
__magic_name__ : Optional[int] = in_proj_weight[256:512, :]
__magic_name__ : str = in_proj_bias[256:512]
__magic_name__ : int = in_proj_weight[-256:, :]
__magic_name__ : List[Any] = in_proj_bias[-256:]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__magic_name__ : Dict = """resnet101"""
if "dc5" in model_name:
__magic_name__ : Union[str, Any] = True
__magic_name__ : Optional[Any] = """panoptic""" in model_name
if is_panoptic:
__magic_name__ : Optional[int] = 250
else:
__magic_name__ : str = 91
__magic_name__ : Optional[int] = """huggingface/label-files"""
__magic_name__ : str = """coco-detection-id2label.json"""
__magic_name__ : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
__magic_name__ : Union[str, Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__magic_name__ : List[str] = idalabel
__magic_name__ : Optional[int] = {v: k for k, v in idalabel.items()}
# load image processor
__magic_name__ : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection"""
__magic_name__ : int = ConditionalDetrImageProcessor(format=_lowerCAmelCase )
# prepare image
__magic_name__ : List[str] = prepare_img()
__magic_name__ : Any = image_processor(images=_lowerCAmelCase , return_tensors='pt' )
__magic_name__ : Any = encoding["""pixel_values"""]
logger.info(f'Converting model {model_name}...' )
# load original model from torch hub
__magic_name__ : Tuple = torch.hub.load('DeppMeng/ConditionalDETR' , _lowerCAmelCase , pretrained=_lowerCAmelCase ).eval()
__magic_name__ : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__magic_name__ : Optional[int] = """conditional_detr.""" + src
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__magic_name__ : Optional[Any] = rename_backbone_keys(_lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCAmelCase , is_panoptic=_lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__magic_name__ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__magic_name__ : Tuple = state_dict.pop(_lowerCAmelCase )
__magic_name__ : Any = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__magic_name__ : Optional[Any] = state_dict.pop(_lowerCAmelCase )
__magic_name__ : Union[str, Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__magic_name__ : Tuple = state_dict.pop(_lowerCAmelCase )
__magic_name__ : Optional[int] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__magic_name__ : Tuple = state_dict.pop(_lowerCAmelCase )
__magic_name__ : Any = val
# finally, create HuggingFace model and load state dict
__magic_name__ : Union[str, Any] = ConditionalDetrForSegmentation(_lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=_lowerCAmelCase , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
__magic_name__ : Any = conditional_detr(_lowerCAmelCase )
__magic_name__ : int = model(_lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase :int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 331
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : List[str], _lowerCAmelCase : Dict ) -> str:
_UpperCAmelCase : Union[str, Any] = OmegaConf.load(_lowerCAmelCase )
_UpperCAmelCase : str = torch.load(_lowerCAmelCase, map_location="""cpu""" )["""model"""]
_UpperCAmelCase : Dict = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : List[str] = """first_stage_model."""
for key in keys:
if key.startswith(_lowerCAmelCase ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : str = {}
_UpperCAmelCase : Tuple = """model.diffusion_model."""
for key in keys:
if key.startswith(_lowerCAmelCase ):
_UpperCAmelCase : Tuple = state_dict[key]
_UpperCAmelCase : Optional[Any] = config.model.params.first_stage_config.params
_UpperCAmelCase : Optional[Any] = config.model.params.unet_config.params
_UpperCAmelCase : List[str] = VQModel(**_lowerCAmelCase ).eval()
vqvae.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = UNetLDMModel(**_lowerCAmelCase ).eval()
unet.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule="""scaled_linear""", beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=_lowerCAmelCase, )
_UpperCAmelCase : Tuple = LDMPipeline(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
pipeline.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 246
| 0
|
def lowerCamelCase_ ( UpperCamelCase__ : list[int], UpperCamelCase__ : list[int], UpperCamelCase__ : int ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase__ ) )
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]], UpperCamelCase__ : int, UpperCamelCase__ : list[int], UpperCamelCase__ : int ):
'''simple docstring'''
if index == len(UpperCamelCase__ ):
return True
# Recursive Step
for i in range(UpperCamelCase__ ):
if valid_coloring(graph[index], UpperCamelCase__, UpperCamelCase__ ):
# Color current vertex
UpperCamelCase__ = i
# Validate coloring
if util_color(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, index + 1 ):
return True
# Backtrack
UpperCamelCase__ = -1
return False
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]], UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = [-1] * len(UpperCamelCase__ )
if util_color(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, 0 ):
return colored_vertices
return []
| 360
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __lowercase :
'''simple docstring'''
_A : int = MBartConfig
_A : str = {}
_A : str = '''gelu'''
def __init__( self : Tuple , _a : Dict , _a : Optional[Any]=13 , _a : List[Any]=7 , _a : Any=True , _a : List[Any]=False , _a : List[Any]=99 , _a : int=32 , _a : Optional[Any]=2 , _a : Optional[Any]=4 , _a : Any=37 , _a : Any=0.1 , _a : Any=0.1 , _a : Dict=20 , _a : Optional[Any]=2 , _a : List[str]=1 , _a : List[str]=0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def A_ ( self : Any ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase__ = prepare_mbart_inputs_dict(_a , _a , _a )
return config, inputs_dict
def A_ ( self : Union[str, Any] , _a : Tuple , _a : Dict ):
UpperCamelCase__ = TFMBartModel(config=_a ).get_decoder()
UpperCamelCase__ = inputs_dict['''input_ids''']
UpperCamelCase__ = input_ids[:1, :]
UpperCamelCase__ = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase__ = inputs_dict['''head_mask''']
UpperCamelCase__ = 1
# first forward pass
UpperCamelCase__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
UpperCamelCase__ = past_key_values[1]
def lowerCamelCase_ ( UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : Tuple=None, ):
'''simple docstring'''
if attention_mask is None:
UpperCamelCase__ = tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
UpperCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_A : List[str] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_A : List[Any] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : List[Any] = True
_A : Any = False
_A : List[Any] = False
def A_ ( self : Any , _a : Tuple , _a : List[Any] , _a : Tuple , _a : List[str] , _a : List[Any] ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def A_ ( self : List[Any] ):
UpperCamelCase__ = TFMBartModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_a )
def A_ ( self : Tuple ):
self.config_tester.run_common_tests()
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
_A : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
_A : Dict = '''facebook/mbart-large-en-ro'''
@cached_property
def A_ ( self : Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : str ):
UpperCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A_ ( self : Optional[int] , **_a : Optional[int] ):
UpperCamelCase__ = self.translate_src_text(**_a )
self.assertListEqual(self.expected_text , _a )
def A_ ( self : List[str] , **_a : Dict ):
UpperCamelCase__ = self.tokenizer(self.src_text , **_a , return_tensors='''tf''' )
UpperCamelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase__ = self.tokenizer.batch_decode(_a , skip_special_tokens=_a )
return generated_words
@slow
def A_ ( self : Optional[Any] ):
self._assert_generated_batch_equal_expected()
| 35
| 0
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
a_ : List[Any] = pd.read_csv("""sample_data.csv""", header=None)
a_ : Union[str, Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
a_ : Any = df.iloc[:, 1:2]
a_ : int = actual_data.values.reshape(len_data, 1)
a_ : int = MinMaxScaler().fit_transform(actual_data)
a_ : Dict = 10
a_ : List[str] = 5
a_ : Dict = 20
a_ : Dict = len_data - periods * look_back
a_ : Any = actual_data[:division]
a_ : Optional[int] = actual_data[division - look_back :]
a_ : Optional[Any] = [], []
a_ : Tuple = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
a_ : List[Any] = np.array(train_x)
a_ : List[Any] = np.array(test_x)
a_ : str = np.array([list(i.ravel()) for i in train_y])
a_ : Any = np.array([list(i.ravel()) for i in test_y])
a_ : Optional[Any] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
a_ : Tuple = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
a_ : int = model.predict(x_test)
| 55
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = tempfile.mkdtemp()
__A : Optional[int] = BlipImageProcessor()
__A : List[str] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
__A : Any = BlipProcessor(_UpperCAmelCase , _UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase).tokenizer
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase).image_processor
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__A : Union[str, Any] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__A : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : List[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__A : Tuple = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : int = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : List[str] = self.prepare_image_inputs()
__A : str = image_processor(_UpperCAmelCase , return_tensors='np')
__A : str = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : Union[str, Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : str = 'lower newer'
__A : Dict = processor(text=_UpperCAmelCase)
__A : Tuple = tokenizer(_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Optional[Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Dict = 'lower newer'
__A : int = self.prepare_image_inputs()
__A : List[Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : List[str] = processor.batch_decode(_UpperCAmelCase)
__A : List[str] = tokenizer.batch_decode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Optional[Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : str = 'lower newer'
__A : str = self.prepare_image_inputs()
__A : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 190
| 0
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str],lowercase_ : List[str],lowercase_ : bool = True,lowercase_ : Dict[str, int] = None,lowercase_ : int = 3_2,lowercase_ : bool = True,lowercase_ : Union[int, float] = 1 / 2_5_5,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073],lowercase_ : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711],lowercase_ : bool = True,lowercase_ : Tuple=7,lowercase_ : str=3_0,lowercase_ : Union[str, Any]=4_0_0,lowercase_ : Dict=3,)-> List[Any]:
'''simple docstring'''
A__ = parent
A__ = do_resize
A__ = size if size is not None else {'shortest_edge': 2_8_8}
A__ = size_divisor
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = do_center_crop
A__ = image_mean
A__ = image_std
A__ = do_pad
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
def snake_case__ ( self : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case__ ( self : int,lowercase_ : Optional[int],lowercase_ : List[str]=False )-> Any:
'''simple docstring'''
if not batched:
A__ = self.size['shortest_edge']
A__ = image_inputs[0]
if isinstance(lowercase_,Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
A__ = size / min(lowercase_,lowercase_ )
if h < w:
A__ , A__ = size, scale * w
else:
A__ , A__ = scale * h, size
A__ = int((1_3_3_3 / 8_0_0) * size )
if max(lowercase_,lowercase_ ) > max_size:
A__ = max_size / max(lowercase_,lowercase_ )
A__ = newh * scale
A__ = neww * scale
A__ , A__ = int(newh + 0.5 ), int(neww + 0.5 )
A__ , A__ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(lowercase_,key=lambda lowercase_ : item[0] )[0]
A__ = max(lowercase_,key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
A__ = BridgeTowerImageProcessingTester(self )
@property
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_,'image_mean' ) )
self.assertTrue(hasattr(lowercase_,'image_std' ) )
self.assertTrue(hasattr(lowercase_,'do_normalize' ) )
self.assertTrue(hasattr(lowercase_,'do_resize' ) )
self.assertTrue(hasattr(lowercase_,'size' ) )
self.assertTrue(hasattr(lowercase_,'size_divisor' ) )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def snake_case__ ( self : List[str] )-> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
| 282
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase_ = getLogger(__name__)
lowercase_ = "cuda" if torch.cuda.is_available() else "cpu"
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict="summarization" , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : str , ) -> Dict:
'''simple docstring'''
A__ = Path(SCREAMING_SNAKE_CASE__ ).open('w' , encoding='utf-8' )
A__ = str(SCREAMING_SNAKE_CASE__ )
A__ = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
if fpaa:
A__ = model.half()
A__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
A__ = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if prefix is None:
A__ = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ):
A__ = [prefix + text for text in examples_chunk]
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE__ , padding='longest' ).to(SCREAMING_SNAKE_CASE__ )
A__ = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE__ , )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
A__ = int(time.time() - start_time ) # seconds
A__ = len(SCREAMING_SNAKE_CASE__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _snake_case( ) -> Tuple:
'''simple docstring'''
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int]=True ) -> Dict:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument('model_name' , type=SCREAMING_SNAKE_CASE__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=SCREAMING_SNAKE_CASE__ , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=SCREAMING_SNAKE_CASE__ , help='where to save summaries' )
parser.add_argument('--reference_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=SCREAMING_SNAKE_CASE__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=SCREAMING_SNAKE_CASE__ , default=8 , required=SCREAMING_SNAKE_CASE__ , help='batch size' )
parser.add_argument(
'--n_obs' , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=SCREAMING_SNAKE_CASE__ , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
A__ , A__ = parser.parse_known_args()
A__ = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE__ )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
A__ = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
A__ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
A__ = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE__ , )
if args.reference_path is None:
return {}
# Compute scores
A__ = calculate_bleu if 'translation' in args.task else calculate_rouge
A__ = [x.rstrip() for x in open(args.save_path ).readlines()]
A__ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE__ )]
A__ = score_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
scores.update(SCREAMING_SNAKE_CASE__ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE__ )
if args.info:
A__ = args.info
if verbose:
print(SCREAMING_SNAKE_CASE__ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE__ , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 282
| 1
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCAmelCase ( a__ ):
lowercase = 42
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , a__ , a__ ):
lowercase = 32
lowercase = 4
lowercase = 4
lowercase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowercase = False
lowercase = (320, 640, 1_280, 1_280)
lowercase = 2
lowercase = 8
lowercase = None
lowercase = 1_280
lowercase = 0.0
lowercase = False
lowercase = jnp.floataa
lowercase = True
lowercase = 0
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
__UpperCamelCase = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
__UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
__UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__UpperCamelCase = jax.random.split(_lowerCamelCase )
__UpperCamelCase = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.block_out_channels
__UpperCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
__UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__UpperCamelCase = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
__UpperCamelCase = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
__UpperCamelCase = []
__UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__UpperCamelCase = output_channel
__UpperCamelCase = block_out_channels[i]
__UpperCamelCase = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__UpperCamelCase = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
__UpperCamelCase = down_blocks
# mid
__UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__UpperCamelCase = []
__UpperCamelCase = list(reversed(_lowerCamelCase ) )
__UpperCamelCase = list(reversed(_lowerCamelCase ) )
__UpperCamelCase = list(reversed(_lowerCamelCase ) )
__UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__UpperCamelCase = output_channel
__UpperCamelCase = reversed_block_out_channels[i]
__UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
__UpperCamelCase = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__UpperCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__UpperCamelCase = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
__UpperCamelCase = output_channel
__UpperCamelCase = up_blocks
# out
__UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = True , __UpperCAmelCase = False , ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , jnp.ndarray ):
__UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
__UpperCamelCase = jnp.expand_dims(_lowerCamelCase , 0 )
__UpperCamelCase = self.time_proj(_lowerCamelCase )
__UpperCamelCase = self.time_embedding(_lowerCamelCase )
# 2. pre-process
__UpperCamelCase = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
__UpperCamelCase = self.conv_in(_lowerCamelCase )
# 3. down
__UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
__UpperCamelCase = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__UpperCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__UpperCamelCase = new_down_block_res_samples
# 4. mid
__UpperCamelCase = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
__UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
__UpperCamelCase = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
__UpperCamelCase = self.conv_norm_out(_lowerCamelCase )
__UpperCamelCase = nn.silu(_lowerCamelCase )
__UpperCamelCase = self.conv_out(_lowerCamelCase )
__UpperCamelCase = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 316
|
'''simple docstring'''
from math import sqrt
def _UpperCamelCase ( UpperCamelCase__ = 1_0_0_0_0_0_0 ):
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCamelCase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 163
| 0
|
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> int:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('String lengths must match!' )
UpperCAmelCase__ = 0
for chara, chara in zip(snake_case__ , snake_case__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained('roberta-base' , from_pt=__a )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 335
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCamelCase : Dict = False
class __UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''', torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''', image=__UpperCAmelCase, text_to_image_strength=0.7_5, generator=__UpperCAmelCase, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''', ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
UpperCAmelCase : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase, torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase : int = generator.manual_seed(0 )
UpperCAmelCase : Any = pipe.dual_guided(
prompt='''first prompt''', image=__UpperCAmelCase, text_to_image_strength=0.7_5, generator=__UpperCAmelCase, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''', ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''', torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase : Any = "cyberpunk 2077"
UpperCAmelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe.dual_guided(
prompt=__UpperCAmelCase, image=__UpperCAmelCase, text_to_image_strength=0.7_5, generator=__UpperCAmelCase, guidance_scale=7.5, num_inference_steps=5_0, output_type='''numpy''', ).images
UpperCAmelCase : Any = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase : str = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger "
UpperCAmelCase : Any = torch.manual_seed(0 )
UpperCAmelCase : Tuple = pipe.text_to_image(
prompt=__UpperCAmelCase, generator=__UpperCAmelCase, guidance_scale=7.5, num_inference_steps=5_0, output_type='''numpy''' ).images
UpperCAmelCase : List[Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase : str = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase : Dict = pipe.image_variation(__UpperCAmelCase, generator=__UpperCAmelCase, output_type='''numpy''' ).images
UpperCAmelCase : Dict = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase : Any = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 336
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : int = """dpr"""
def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : Optional[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Dict = num_attention_heads
a : int = hidden_act
a : Any = intermediate_size
a : Any = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Union[str, Any] = type_vocab_size
a : Optional[Any] = initializer_range
a : Dict = layer_norm_eps
a : int = projection_dim
a : str = position_embedding_type
| 40
| 0
|
import unittest
from knapsack import knapsack as k
class __a ( unittest.TestCase ):
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = 0
UpperCamelCase__ : Dict = [0]
UpperCamelCase__ : Optional[int] = [0]
UpperCamelCase__ : Tuple = len(SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 0 )
UpperCamelCase__ : Union[str, Any] = [60]
UpperCamelCase__ : Dict = [10]
UpperCamelCase__ : List[str] = len(SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 0 )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = 3
UpperCamelCase__ : List[str] = [1, 2, 3]
UpperCamelCase__ : Any = [3, 2, 1]
UpperCamelCase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 5 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = 50
UpperCamelCase__ : Tuple = [60, 1_00, 1_20]
UpperCamelCase__ : Optional[int] = [10, 20, 30]
UpperCamelCase__ : List[str] = len(SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 196
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=7 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Dict=18 , SCREAMING_SNAKE_CASE : str=30 , SCREAMING_SNAKE_CASE : Union[str, Any]=4_00 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Any=32 , SCREAMING_SNAKE_CASE : int=True , ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = parent
UpperCamelCase__ : Dict = batch_size
UpperCamelCase__ : Optional[Any] = num_channels
UpperCamelCase__ : int = image_size
UpperCamelCase__ : Dict = min_resolution
UpperCamelCase__ : List[str] = max_resolution
UpperCamelCase__ : Union[str, Any] = do_resize
UpperCamelCase__ : Optional[Any] = size_divisor
UpperCamelCase__ : int = do_rescale
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : List[Any] = GLPNImageProcessor if is_vision_available() else None
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = GLPNImageProcessingTester(self )
@property
def __lowercase ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size_divisor" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "resample" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_rescale" ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 196
| 1
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __A ( _a ):
'''simple docstring'''
__lowerCamelCase : Dict = field(default=_a , metadata={'help': 'Whether to use SortishSampler or not.'} )
__lowerCamelCase : Optional[Any] = field(
default=_a , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
__lowerCamelCase : Tuple = field(
default=_a , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
__lowerCamelCase : Tuple = field(
default=_a , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
__lowerCamelCase : List[str] = field(
default=_a , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = super().to_dict()
for k, v in d.items():
if isinstance(snake_case_ , snake_case_ ):
_a = v.to_dict()
return d
| 211
|
'''simple docstring'''
__a = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset([])
__a = frozenset(["image"])
__a = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image"])
__a = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "negative_prompt"])
__a = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__a = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image", "mask_image"])
__a = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["example_image", "image", "mask_image"])
__a = frozenset(["class_labels"])
__a = frozenset(["class_labels"])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset(["input_tokens"])
__a = frozenset(["input_tokens"])
| 35
| 0
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase__ : List[str] = None
try:
import msvcrt
except ImportError:
lowercase__ : Optional[Any] = None
try:
import fcntl
except ImportError:
lowercase__ : Union[str, Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase__ : Dict = OSError
# Data
# ------------------------------------------------
lowercase__ : Union[str, Any] = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowercase__ : Dict = '''3.0.12'''
lowercase__ : Optional[Any] = None
def SCREAMING_SNAKE_CASE ( ) -> int:
global _logger
a = _logger or logging.getLogger(__name__)
return _logger
class a__ ( A__ ):
def __init__( self , A ) -> int:
'''simple docstring'''
a = lock_file
return None
def __str__( self ) -> Union[str, Any]:
'''simple docstring'''
a = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class a__ :
def __init__( self , A ) -> str:
'''simple docstring'''
a = lock
return None
def __enter__( self ) -> Dict:
'''simple docstring'''
return self.lock
def __exit__( self , A , A , A ) -> Any:
'''simple docstring'''
self.lock.release()
return None
class a__ :
def __init__( self , A , A=-1 , A=None ) -> Tuple:
'''simple docstring'''
a = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
a = self.hash_filename_if_too_long(lowerCamelCase__ , lowerCamelCase__ )
# The path to the lock file.
a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
a = None
# The default timeout value.
a = timeout
# We use this lock primarily for the lock counter.
a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
a = 0
return None
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self._lock_file
@property
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCAmelCase_ ( self , A ) -> Optional[Any]:
'''simple docstring'''
a = float(lowerCamelCase__ )
return None
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
raise NotImplementedError()
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCAmelCase_ ( self , A=None , A=0.0_5 ) -> int:
'''simple docstring'''
if timeout is None:
a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
a = id(self )
a = self._lock_file
a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase_ ( self , A=False ) -> List[str]:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
a = id(self )
a = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
a = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> List[Any]:
'''simple docstring'''
self.acquire()
return self
def __exit__( self , A , A , A ) -> List[Any]:
'''simple docstring'''
self.release()
return None
def __del__( self ) -> int:
'''simple docstring'''
self.release(force=lowerCamelCase__ )
return None
def lowerCAmelCase_ ( self , A , A ) -> int:
'''simple docstring'''
a = os.path.basename(lowerCamelCase__ )
if len(lowerCamelCase__ ) > max_length and max_length > 0:
a = os.path.dirname(lowerCamelCase__ )
a = str(hash(lowerCamelCase__ ) )
a = filename[: max_length - len(lowerCamelCase__ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
else:
return path
class a__ ( A__ ):
def __init__( self , A , A=-1 , A=None ) -> Dict:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
a = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase__ )
else:
a = fd
return None
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a = self._lock_file_fd
a = None
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class a__ ( A__ ):
def __init__( self , A , A=-1 , A=None ) -> Any:
'''simple docstring'''
a = os.statvfs(os.path.dirname(lowerCamelCase__ ) ).f_namemax
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
a = os.open(self._lock_file , lowerCamelCase__ )
try:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase__ )
else:
a = fd
return None
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a = self._lock_file_fd
a = None
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
os.close(lowerCamelCase__ )
return None
class a__ ( A__ ):
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
a = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
a = fd
return None
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
os.close(self._lock_file_fd )
a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase__ : List[str] = None
if msvcrt:
lowercase__ : Optional[int] = WindowsFileLock
elif fcntl:
lowercase__ : List[str] = UnixFileLock
else:
lowercase__ : Any = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 357
|
lowercase__ : List[Any] = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowercase__ : str = {value: key for key, value in encode_dict.items()}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
a = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces")
return encoded
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
if set(__UpperCamelCase) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces")
a = ""
for word in coded.split():
while len(__UpperCamelCase) != 0:
decoded += decode_dict[word[:5]]
a = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 180
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[Any] , lowercase : Tuple=13 , lowercase : Optional[Any]=7 , lowercase : Tuple=True , lowercase : Optional[Any]=True , lowercase : str=True , lowercase : Dict=True , lowercase : List[str]=99 , lowercase : Tuple=32 , lowercase : str=2 , lowercase : Any=4 , lowercase : Tuple=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Any=0.1 , lowercase : List[str]=512 , lowercase : Optional[int]=16 , lowercase : str=2 , lowercase : Dict=0.02 , lowercase : Any=3 , lowercase : List[Any]=4 , lowercase : List[Any]=None , lowercase : Optional[Any]=0 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
_snake_case = projection_dim
def A ( self : int ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
_snake_case = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Union[str, Any] , lowercase : Dict , lowercase : List[Any] , lowercase : Tuple , lowercase : Dict , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
_snake_case = TFDPRContextEncoder(config=lowercase )
_snake_case = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A ( self : List[str] , lowercase : Optional[int] , lowercase : int , lowercase : str , lowercase : Dict , lowercase : int , lowercase : Union[str, Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = TFDPRQuestionEncoder(config=lowercase )
_snake_case = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A ( self : List[str] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = TFDPRReader(config=lowercase )
_snake_case = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : str = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
_UpperCAmelCase : str = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFDPRModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowercase )
@slow
def A ( self : Any ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRContextEncoder.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRContextEncoder.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRQuestionEncoder.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRReader.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_snake_case = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_snake_case = model(lowercase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_snake_case = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 282
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase )
| 282
| 1
|
from bisect import bisect
from itertools import accumulate
def a( A : Optional[int] , A : List[Any] , A : List[str] , A : str ) -> Union[str, Any]:
"""simple docstring"""
a = sorted(zip(A , A ) , key=lambda A : x[0] / x[1] , reverse=A )
a , a = [i[0] for i in r], [i[1] for i in r]
a = list(accumulate(A ) )
a = bisect(A , A )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = LxmertTokenizer
__A = LxmertTokenizerFast
__A = True
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = "UNwant\u00E9d,running"
a = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ (self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(lowerCamelCase_ )
a = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
a = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a = self.get_rust_tokenizer()
a = tokenizer.encode(lowerCamelCase_ )
a = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 71
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'xlm'
__UpperCAmelCase = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : Optional[Any] ,snake_case : Optional[Any]=30145 ,snake_case : Optional[int]=2048 ,snake_case : List[Any]=12 ,snake_case : List[str]=16 ,snake_case : Dict=0.1 ,snake_case : int=0.1 ,snake_case : Union[str, Any]=True ,snake_case : Optional[int]=False ,snake_case : Any=False ,snake_case : Tuple=False ,snake_case : Union[str, Any]=1 ,snake_case : Any=True ,snake_case : Tuple=512 ,snake_case : Dict=2048**-0.5 ,snake_case : Union[str, Any]=1e-12 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=0 ,snake_case : int=1 ,snake_case : Optional[Any]=2 ,snake_case : Optional[int]=3 ,snake_case : Union[str, Any]=5 ,snake_case : List[str]=True ,snake_case : Union[str, Any]="first" ,snake_case : Optional[int]=True ,snake_case : int=None ,snake_case : List[Any]=True ,snake_case : str=0.1 ,snake_case : List[Any]=5 ,snake_case : Optional[Any]=5 ,snake_case : Optional[Any]=0 ,snake_case : List[Any]=0 ,snake_case : List[str]=2 ,snake_case : Optional[Any]=0 ,**snake_case : Any ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =emb_dim
SCREAMING_SNAKE_CASE =n_layers
SCREAMING_SNAKE_CASE =n_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =gelu_activation
SCREAMING_SNAKE_CASE =sinusoidal_embeddings
SCREAMING_SNAKE_CASE =causal
SCREAMING_SNAKE_CASE =asm
SCREAMING_SNAKE_CASE =n_langs
SCREAMING_SNAKE_CASE =use_lang_emb
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =bos_index
SCREAMING_SNAKE_CASE =eos_index
SCREAMING_SNAKE_CASE =pad_index
SCREAMING_SNAKE_CASE =unk_index
SCREAMING_SNAKE_CASE =mask_index
SCREAMING_SNAKE_CASE =is_encoder
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =embed_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =summary_type
SCREAMING_SNAKE_CASE =summary_use_proj
SCREAMING_SNAKE_CASE =summary_activation
SCREAMING_SNAKE_CASE =summary_proj_to_labels
SCREAMING_SNAKE_CASE =summary_first_dropout
SCREAMING_SNAKE_CASE =start_n_top
SCREAMING_SNAKE_CASE =end_n_top
SCREAMING_SNAKE_CASE =mask_token_id
SCREAMING_SNAKE_CASE =lang_id
if "n_words" in kwargs:
SCREAMING_SNAKE_CASE =kwargs['n_words']
super().__init__(pad_token_id=snake_case ,bos_token_id=snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Dict ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 334
|
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334
| 1
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : List[Any] = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : str = num_choices
__UpperCAmelCase : List[Any] = scope
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : int = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCAmelCase : Tuple = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
__UpperCAmelCase : Optional[int] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
__UpperCAmelCase : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCAmelCase : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = LlamaModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : str = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = 3
__UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
__UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : Optional[Any] = """single_label_classification"""
__UpperCAmelCase : int = input_dict["""input_ids"""]
__UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = 3
__UpperCAmelCase : str = """multi_label_classification"""
__UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""]
__UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __A ( self , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size )
__UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
__UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state
__UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0}
__UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
__UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state
__UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
@require_torch
class _A ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
__UpperCAmelCase : int = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
__UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) )
# Expected mean on dim = -1
__UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
__UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) )
# Expected mean on dim = -1
__UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
__UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) )
__UpperCAmelCase : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """
__UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" )
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase )
# greedy generation outputs
__UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 16
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = (3, 32, 128)
__UpperCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__UpperCAmelCase : Optional[int] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
__UpperCAmelCase : List[Any] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
__UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __A ( self , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__UpperCAmelCase : Dict = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) )
return image_input
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
__UpperCAmelCase : List[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Tuple = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : List[str] = self.prepare_image_inputs()
__UpperCAmelCase : str = image_processor(__UpperCAmelCase , return_tensors="""np""" )
__UpperCAmelCase : int = processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : int = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : Dict = """test"""
__UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : List[Any] = """test"""
__UpperCAmelCase : int = self.prepare_image_inputs()
__UpperCAmelCase : Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : Optional[Any] = processor.char_decode(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase )
__UpperCAmelCase : int = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = self.get_tokenizer()
__UpperCAmelCase : Any = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : str = None
__UpperCAmelCase : Dict = self.prepare_image_inputs()
__UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : str = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : Tuple = torch.randn(1 , 27 , 38 )
__UpperCAmelCase : Union[str, Any] = torch.randn(1 , 27 , 50_257 )
__UpperCAmelCase : Any = torch.randn(1 , 27 , 30_522 )
__UpperCAmelCase : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 16
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.