code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase ):
lowerCamelCase : str = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Optional[int] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Tuple:
lowerCAmelCase = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
lowerCAmelCase = generator('Something there' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
lowerCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
lowerCAmelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
lowerCAmelCase = generator('Something there' , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ''}] )
lowerCAmelCase = 3
lowerCAmelCase = generator(
'Something there' , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
lowerCAmelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase = generator('This is a test' , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
lowerCAmelCase = generator.model.config.eos_token_id
lowerCAmelCase = '<pad>'
lowerCAmelCase = generator(
['This is a test', 'This is a second test'] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def __UpperCAmelCase ( self : int ) -> str:
lowerCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
lowerCAmelCase = generator('Something there' , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ''}] )
| 4
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = generator("""Something there""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
__magic_name__ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
__magic_name__ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
__magic_name__ = 3
__magic_name__ = generator(
"""Something there""" , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
__magic_name__ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = generator("""This is a test""" , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
__magic_name__ = generator.model.config.eos_token_id
__magic_name__ = """<pad>"""
__magic_name__ = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowercase ( self : int ) -> str:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
| 88
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase : int = 16
lowercase : int = 32
def SCREAMING_SNAKE_CASE__ ( __A , __A = 16 ) -> str:
_snake_case = AutoTokenizer.from_pretrained('bert-base-cased' )
_snake_case = load_dataset('glue' , 'mrpc' )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
_snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case = datasets.map(
__A , batched=__A , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case = 16
elif accelerator.mixed_precision != "no":
_snake_case = 8
else:
_snake_case = None
return tokenizer.pad(
__A , padding='longest' , max_length=__A , pad_to_multiple_of=__A , return_tensors='pt' , )
# Instantiate dataloaders.
_snake_case = DataLoader(
tokenized_datasets['train'] , shuffle=__A , collate_fn=__A , batch_size=__A )
_snake_case = DataLoader(
tokenized_datasets['validation'] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Dict:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __A ) == "1":
_snake_case = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
_snake_case = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case = config['lr']
_snake_case = int(config['num_epochs'] )
_snake_case = int(config['seed'] )
_snake_case = int(config['batch_size'] )
set_seed(__A )
_snake_case , _snake_case = get_dataloaders(__A , __A )
_snake_case = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_snake_case = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_snake_case = batch_size // MAX_GPU_BATCH_SIZE
_snake_case = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case = model.to(accelerator.device )
# Instantiate optimizer
_snake_case = AdamW(params=model.parameters() , lr=__A )
# Instantiate scheduler
_snake_case = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=100 , num_training_steps=(len(__A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
__A , __A , __A , __A , __A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_snake_case = os.path.split(__A )[-1].split('.' )[0]
accelerator.init_trackers(__A , __A )
# Now we train the model
for epoch in range(__A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_snake_case = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case = model(**__A )
_snake_case = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_snake_case = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_snake_case = model(**__A )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__A , references=__A , )
_snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(__A ),
'epoch': epoch,
} , step=__A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
_snake_case = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__A , default=__A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=__A , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
_snake_case = parser.parse_args()
_snake_case = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 356
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
_snake_case = img
_snake_case = img.shape[1]
_snake_case = img.shape[0]
_snake_case = dst_width
_snake_case = dst_height
_snake_case = self.src_w / self.dst_w
_snake_case = self.src_h / self.dst_h
_snake_case = _snake_case = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def lowerCamelCase ( self ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_snake_case = self.img[self.get_y(lowerCAmelCase_ )][self.get_x(lowerCAmelCase_ )]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return int(self.ratio_x * x )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
lowercase , lowercase : Optional[Any] = 800, 600
lowercase : Tuple = imread("image_data/lena.jpg", 1)
lowercase : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 160
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__snake_case = logging.get_logger(__name__)
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 97
|
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97
| 1
|
__UpperCamelCase : List[Any] = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 371
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = '''▁'''
__UpperCamelCase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
__UpperCamelCase : Optional[Any] = {
'''google/reformer-crime-and-punishment''': 5_2_4_2_8_8,
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] ,lowercase_ : List[str] ,lowercase_ : Optional[int]="</s>" ,lowercase_ : List[Any]="<unk>" ,lowercase_ : Optional[Any]=[] ,lowercase_ : Optional[Dict[str, Any]] = None ,**lowercase_ : int ,):
lowerCAmelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ ,unk_token=lowercase_ ,additional_special_tokens=lowercase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase_ ,)
lowerCAmelCase__ : List[str] = vocab_file
lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def __lowerCAmelCase ( self : List[str] ):
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
lowerCAmelCase__ : str = self.__dict__.copy()
lowerCAmelCase__ : Any = None
return state
def __setstate__( self : List[str] ,lowercase_ : Any ):
lowerCAmelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowerCAmelCase__ : Tuple = {}
lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Dict ,lowercase_ : str ):
return self.sp_model.encode(lowercase_ ,out_type=lowercase_ )
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : int ):
return self.sp_model.piece_to_id(lowercase_ )
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Dict ):
if index < self.sp_model.get_piece_size():
lowerCAmelCase__ : List[Any] = self.sp_model.IdToPiece(lowercase_ )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
lowerCAmelCase__ : Dict = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : str ,lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : List[Any] = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ ,'''wb''' ) as fi:
lowerCAmelCase__ : Any = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 74
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Any = "new-model"
if is_tf_available():
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Any = NewModelConfig
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] ="""bert-base-cased"""
_a : Optional[int] =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
_a : Tuple ="""bert-base-cased"""
_a : Optional[Any] =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Dict =TFAutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE )
_a , _a : Dict =TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Tuple =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : str =TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Optional[Any] =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE )
_a , _a : Dict =TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[Any] =TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE )
_a , _a : int =TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_a : str =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : int =TFAutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_a : List[str] =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[Any] =TFAutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_probability
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_a : str =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : str =TFAutoModelForTableQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE )
_a , _a : Dict =TFAutoModelForTableQuestionAnswering.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
_a : Union[str, Any] =TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE ) , 1_4_4_1_0 )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : str =TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE ) , 1_4_4_1_0 )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
_a : Any =TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[Any] =copy.deepcopy(model.config )
_a : List[Any] =["""FunnelBaseModel"""]
_a : List[str] =TFAutoModel.from_config(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
try:
AutoConfig.register("""new-model""" , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =[
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
auto_class.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
auto_class.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
auto_class.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
_a : str =BertModelTester(self ).get_config()
_a : Optional[Any] =NewModelConfig(**tiny_config.to_dict() )
_a : Union[str, Any] =auto_class.from_config(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =auto_class.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , """bert-base is not a local folder and is not a valid model identifier""" ):
_a : List[str] =TFAutoModel.from_pretrained("""bert-base""" )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_a : Union[str, Any] =TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision="""aaaaaa""" )
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
_a : List[str] =TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def __UpperCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , """Use `from_pt=True` to load this model""" ):
_a : str =TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def __UpperCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
# Make sure we have cached the model.
_a : List[Any] =TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
_a : str =TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_a : Optional[int] =TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
_a : List[Any] =TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 276
|
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(_UpperCAmelCase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ) -> Optional[Any]:
_a : Tuple =_distribute_shards(**_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ) -> List[str]:
_a : List[str] =_split_gen_kwargs(_UpperCAmelCase ,_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
if expected is RuntimeError:
with pytest.raises(_UpperCAmelCase ):
_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
else:
_a : Dict =_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
assert out == expected
| 276
| 1
|
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : int = sorted(numsa + numsa )
A , A : List[Any] = divmod(len(_lowerCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = [float(x) for x in input("""Enter the elements of first array: """).split()]
__SCREAMING_SNAKE_CASE = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 256
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase ):
A : List[Any] = R"\w+[.]\d+"
A : Optional[Any] = re.findall(_lowerCamelCase , _lowerCamelCase )
for pat in pats:
A : int = key.replace(_lowerCamelCase , "_".join(pat.split("." ) ) )
return key
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Union[str, Any] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A : List[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A : int = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A : List[Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Optional[int] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : List[Any] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
A : int = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : List[str] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : Optional[Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=42 ):
# Step 1: Convert pytorch tensor to numpy
A : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A : Dict = flax_model.init_weights(PRNGKey(_lowerCamelCase ) )
A : Dict = flatten_dict(_lowerCamelCase )
A : Dict = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : Tuple = rename_key(_lowerCamelCase )
A : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
A , A : str = rename_key_and_reshape_tensor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A : Union[str, Any] = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
| 256
| 1
|
def A ( _lowercase , _lowercase ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(_lowercase , _lowercase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_lowercase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """attention_mask"""]
def __init__( self : Any , UpperCamelCase__ : List[str]=80 , UpperCamelCase__ : Tuple=1_6000 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : int=25 , UpperCamelCase__ : Optional[Any]="hamming_window" , UpperCamelCase__ : Tuple=3_2768.0 , UpperCamelCase__ : str=0.97 , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=False , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = feature_size
SCREAMING_SNAKE_CASE : Union[str, Any] = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Optional[Any] = hop_length
SCREAMING_SNAKE_CASE : Tuple = win_length
SCREAMING_SNAKE_CASE : Union[str, Any] = frame_signal_scale
SCREAMING_SNAKE_CASE : int = preemphasis_coeff
SCREAMING_SNAKE_CASE : List[Any] = mel_floor
SCREAMING_SNAKE_CASE : int = normalize_means
SCREAMING_SNAKE_CASE : List[str] = normalize_vars
SCREAMING_SNAKE_CASE : Any = win_function
SCREAMING_SNAKE_CASE : Union[str, Any] = return_attention_mask
SCREAMING_SNAKE_CASE : int = win_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE : Optional[int] = hop_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE : int = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE : Tuple = (self.n_fft // 2) + 1
def __A ( self : str , UpperCamelCase__ : np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE : List[str] = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Tuple = window_function(window_length=self.sample_size , name=self.win_function )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=UpperCamelCase__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=UpperCamelCase__ , preemphasis=self.preemphasis_coeff , mel_filters=UpperCamelCase__ , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def __A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if self.normalize_means:
SCREAMING_SNAKE_CASE : str = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE : List[str] = np.subtract(UpperCamelCase__ , UpperCamelCase__ )
if self.normalize_vars:
SCREAMING_SNAKE_CASE : str = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE : Optional[int] = np.divide(UpperCamelCase__ , UpperCamelCase__ )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE : List[str] = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE : str = x.astype(np.floataa )
return x
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[np.ndarray] , UpperCamelCase__ : Optional[np.ndarray] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(UpperCamelCase__ , UpperCamelCase__ , self.padding_value ) for x, n in zip(UpperCamelCase__ , UpperCamelCase__ )]
def __call__( self : Dict , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : Any = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : str = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : int = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE : Optional[Any] = [self._extract_mfsc_features(UpperCamelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = BatchFeature({'''input_features''': features} )
SCREAMING_SNAKE_CASE : Optional[Any] = self.pad(
UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
# make sure list is in array format
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE : Optional[Any] = (
np.array(UpperCamelCase__ , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase__ , max_length=UpperCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE : List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.convert_to_tensors(UpperCamelCase__ )
return padded_inputs
| 182
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any]=False ):
UpperCamelCase_ : List[str] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
UpperCamelCase_ : int = 'segformer.encoder.' + key
if key.startswith('backbone' ):
UpperCamelCase_ : List[str] = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase_ : List[Any] = key[key.find('patch_embed' ) + len('patch_embed' )]
UpperCamelCase_ : Optional[Any] = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(lowerCamelCase )-1}" )
if "norm" in key:
UpperCamelCase_ : Optional[Any] = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase_ : Tuple = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
UpperCamelCase_ : str = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(lowerCamelCase )-1}" )
if "layer_norm1" in key:
UpperCamelCase_ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
UpperCamelCase_ : Dict = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase_ : Dict = key[key.find('block' ) + len('block' )]
UpperCamelCase_ : Any = key.replace(F"block{idx}" , F"block.{int(lowerCamelCase )-1}" )
if "attn.q" in key:
UpperCamelCase_ : Tuple = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
UpperCamelCase_ : int = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
UpperCamelCase_ : Optional[Any] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
UpperCamelCase_ : Any = key.replace('fc1' , 'dense1' )
if "fc2" in key:
UpperCamelCase_ : Optional[Any] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
UpperCamelCase_ : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
UpperCamelCase_ : Union[str, Any] = key.replace('linear_fuse.conv' , 'linear_fuse' )
UpperCamelCase_ : Optional[Any] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase_ : Dict = key[key.find('linear_c' ) + len('linear_c' )]
UpperCamelCase_ : Tuple = key.replace(F"linear_c{idx}" , F"linear_c.{int(lowerCamelCase )-1}" )
if key.startswith('head' ):
UpperCamelCase_ : str = key.replace('head' , 'classifier' )
UpperCamelCase_ : int = value
return new_state_dict
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase_ : Any = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
UpperCamelCase_ : int = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
UpperCamelCase_ : Any = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase_ : Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase_ : List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase_ : str = kv_bias[
config.hidden_sizes[i] :
]
def __lowercase ( ):
UpperCamelCase_ : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : Tuple = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ):
UpperCamelCase_ : Union[str, Any] = SegformerConfig()
UpperCamelCase_ : Tuple = False
# set attributes based on model_name
UpperCamelCase_ : Any = 'huggingface/label-files'
if "segformer" in model_name:
UpperCamelCase_ : Optional[int] = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
UpperCamelCase_ : Optional[int] = 150
UpperCamelCase_ : Optional[Any] = 'ade20k-id2label.json'
UpperCamelCase_ : Optional[Any] = (1, 150, 128, 128)
elif "city" in model_name:
UpperCamelCase_ : List[Any] = 19
UpperCamelCase_ : Dict = 'cityscapes-id2label.json'
UpperCamelCase_ : List[str] = (1, 19, 128, 128)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : str = model_name[4:6]
UpperCamelCase_ : str = 1000
UpperCamelCase_ : Any = 'imagenet-1k-id2label.json'
UpperCamelCase_ : Optional[Any] = (1, 1000)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
UpperCamelCase_ : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase_ : Dict = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_ : List[str] = idalabel
UpperCamelCase_ : str = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
UpperCamelCase_ : Union[str, Any] = [64, 128, 320, 512]
UpperCamelCase_ : Optional[Any] = 256
elif size == "b2":
UpperCamelCase_ : Any = [64, 128, 320, 512]
UpperCamelCase_ : int = 768
UpperCamelCase_ : Any = [3, 4, 6, 3]
elif size == "b3":
UpperCamelCase_ : Tuple = [64, 128, 320, 512]
UpperCamelCase_ : Union[str, Any] = 768
UpperCamelCase_ : List[str] = [3, 4, 18, 3]
elif size == "b4":
UpperCamelCase_ : Optional[Any] = [64, 128, 320, 512]
UpperCamelCase_ : List[Any] = 768
UpperCamelCase_ : str = [3, 8, 27, 3]
elif size == "b5":
UpperCamelCase_ : Optional[Any] = [64, 128, 320, 512]
UpperCamelCase_ : Union[str, Any] = 768
UpperCamelCase_ : List[str] = [3, 6, 40, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
UpperCamelCase_ : Union[str, Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
# prepare image
UpperCamelCase_ : Optional[Any] = prepare_img()
UpperCamelCase_ : int = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
UpperCamelCase_ : str = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
else:
UpperCamelCase_ : Tuple = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
UpperCamelCase_ : Union[str, Any] = rename_keys(lowerCamelCase , encoder_only=lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Any = SegformerForImageClassification(lowerCamelCase )
else:
UpperCamelCase_ : Optional[Any] = SegformerForSemanticSegmentation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
UpperCamelCase_ : Optional[Any] = model(lowerCamelCase )
UpperCamelCase_ : Dict = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
UpperCamelCase_ : Optional[Any] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
UpperCamelCase_ : Optional[Any] = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -10.3529, -10.0304], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
UpperCamelCase_ : Tuple = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
UpperCamelCase_ : Dict = torch.tensor(
[
[[-9.0_8_7_8, -10.2081, -10.1891], [-9.3_1_4_4, -10.7941, -10.9843], [-9.2_2_9_4, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
UpperCamelCase_ : Optional[int] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
UpperCamelCase_ : int = torch.tensor(
[
[[-9.5_5_2_4, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5_8_4_2, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
UpperCamelCase_ : Union[str, Any] = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
UpperCamelCase_ : Tuple = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -10.1717], [-9.4_4_3_8, -10.9058, -11.4047], [-9.7_9_3_9, -12.3495, -12.1079]],
[[-7.1_5_1_4, -9.5_3_3_6, -10.0860], [-9.7_7_7_6, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
UpperCamelCase_ : int = torch.tensor(
[
[
[-1.1_372e01, -1.2_787e01, -1.3_477e01],
[-1.2_536e01, -1.4_194e01, -1.4_409e01],
[-1.3_217e01, -1.4_888e01, -1.5_327e01],
],
[
[-1.4_791e01, -1.7_122e01, -1.8_277e01],
[-1.7_163e01, -1.9_192e01, -1.9_533e01],
[-1.7_897e01, -1.9_991e01, -2.0_315e01],
],
[
[7.6_723e-01, 4.1_921e-01, -7.7_878e-02],
[4.7_772e-01, 9.5_557e-03, -2.8_082e-01],
[3.6_032e-01, -2.4_826e-01, -5.1_168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
UpperCamelCase_ : Optional[Any] = torch.tensor(
[
[[-9.4_9_5_9, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8_9_0_5, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
UpperCamelCase_ : int = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
UpperCamelCase_ : Any = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
UpperCamelCase_ : Tuple = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
UpperCamelCase_ : Union[str, Any] = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
UpperCamelCase_ : int = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
UpperCamelCase_ : str = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
a_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 363
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
a_ = random.Random()
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : int=1.0 , lowerCamelCase : Optional[int]=None , lowerCamelCase : Optional[int]=None ):
if rng is None:
UpperCamelCase_ : Union[str, Any] = global_rng
UpperCamelCase_ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowercase ( unittest.TestCase ):
def __init__( self : Optional[Any] , snake_case : Tuple , snake_case : str=7 , snake_case : Tuple=4_0_0 , snake_case : List[Any]=2_0_0_0 , snake_case : Optional[Any]=2_4 , snake_case : Tuple=2_4 , snake_case : Dict=0.0 , snake_case : Any=1_6_0_0_0 , snake_case : Tuple=True , snake_case : List[str]=True , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = parent
UpperCamelCase_ : int = batch_size
UpperCamelCase_ : str = min_seq_length
UpperCamelCase_ : str = max_seq_length
UpperCamelCase_ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_ : int = feature_size
UpperCamelCase_ : Optional[int] = num_mel_bins
UpperCamelCase_ : str = padding_value
UpperCamelCase_ : Union[str, Any] = sampling_rate
UpperCamelCase_ : Tuple = return_attention_mask
UpperCamelCase_ : List[str] = do_normalize
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Dict=False , snake_case : List[str]=False ) -> int:
"""simple docstring"""
def _flatten(snake_case : Optional[Any] ):
return list(itertools.chain(*snake_case ) )
if equal_length:
UpperCamelCase_ : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase_ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_ : List[str] = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = SpeechaTextFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[str] = SpeechaTextFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : str ) -> Tuple:
"""simple docstring"""
self.assertTrue(np.all(np.mean(snake_case , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case , axis=0 ) - 1 ) < 1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : List[Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase_ : Tuple = feature_extractor(snake_case , padding=snake_case , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase_ : int = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
UpperCamelCase_ : str = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test batched
UpperCamelCase_ : Union[str, Any] = feature_extractor(snake_case , return_tensors='np' ).input_features
UpperCamelCase_ : List[str] = feature_extractor(snake_case , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase_ : int = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCamelCase_ : List[str] = np.asarray(snake_case )
UpperCamelCase_ : Any = feature_extractor(snake_case , return_tensors='np' ).input_features
UpperCamelCase_ : str = feature_extractor(snake_case , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : Union[str, Any] = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase_ : Tuple = [None, 1_6, None]
for max_length, padding in zip(snake_case , snake_case ):
UpperCamelCase_ : Optional[Any] = feature_extractor(
snake_case , padding=snake_case , max_length=snake_case , return_attention_mask=snake_case )
UpperCamelCase_ : List[str] = inputs.input_features
UpperCamelCase_ : List[str] = inputs.attention_mask
UpperCamelCase_ : Optional[int] = [np.sum(snake_case ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : List[str] = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase_ : Optional[Any] = [None, 1_6, None]
for max_length, padding in zip(snake_case , snake_case ):
UpperCamelCase_ : Any = feature_extractor(
snake_case , max_length=snake_case , padding=snake_case , return_tensors='np' , return_attention_mask=snake_case )
UpperCamelCase_ : int = inputs.input_features
UpperCamelCase_ : Optional[int] = inputs.attention_mask
UpperCamelCase_ : str = [np.sum(snake_case ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : str = feature_extractor(
snake_case , padding='max_length' , max_length=4 , truncation=snake_case , return_tensors='np' , return_attention_mask=snake_case , )
UpperCamelCase_ : int = inputs.input_features
UpperCamelCase_ : Union[str, Any] = inputs.attention_mask
UpperCamelCase_ : Dict = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : Any = feature_extractor(
snake_case , padding='longest' , max_length=4 , truncation=snake_case , return_tensors='np' , return_attention_mask=snake_case , )
UpperCamelCase_ : Dict = inputs.input_features
UpperCamelCase_ : List[Any] = inputs.attention_mask
UpperCamelCase_ : Tuple = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
UpperCamelCase_ : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : int = feature_extractor(
snake_case , padding='longest' , max_length=1_6 , truncation=snake_case , return_tensors='np' , return_attention_mask=snake_case , )
UpperCamelCase_ : Dict = inputs.input_features
UpperCamelCase_ : Union[str, Any] = inputs.attention_mask
UpperCamelCase_ : Dict = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
import torch
UpperCamelCase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : Optional[Any] = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
UpperCamelCase_ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase_ : Tuple = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase_ : Tuple = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Tuple ) -> Dict:
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase_ : Optional[int] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
UpperCamelCase_ : Optional[Any] = ds.sort('id' ).select(range(snake_case ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
UpperCamelCase_ : str = self._load_datasamples(1 )
UpperCamelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : str = feature_extractor(snake_case , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , snake_case , atol=1e-4 ) )
| 50
| 0
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
__A = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :List[Any] = field(
default="tab_fact" ,metadata={"help": "The name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase :Tuple = field(
default="tab_fact" ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ,)
_UpperCAmelCase :List[Any] = field(
default=1024 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
_UpperCAmelCase :List[str] = field(
default=_UpperCamelCase ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
_UpperCAmelCase :Optional[int] = field(
default=_UpperCamelCase ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
_UpperCAmelCase :Optional[int] = field(
default=_UpperCamelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
_UpperCAmelCase :Tuple = field(
default=_UpperCamelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
_UpperCAmelCase :Optional[Any] = field(
default=_UpperCamelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} ,)
_UpperCAmelCase :List[Any] = field(
default=_UpperCamelCase ,metadata={"help": "A csv or a json file containing the training data."} )
_UpperCAmelCase :int = field(
default=_UpperCamelCase ,metadata={"help": "A csv or a json file containing the validation data."} )
_UpperCAmelCase :Any = field(default=_UpperCamelCase ,metadata={"help": "A csv or a json file containing the test data."} )
def _snake_case ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowercase__: int = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowercase__: Dict = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
default=_UpperCamelCase ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCAmelCase :List[Any] = field(
default=_UpperCamelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase :Optional[int] = field(
default=_UpperCamelCase ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase :Union[str, Any] = field(
default=_UpperCamelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
_UpperCAmelCase :Any = field(
default=_UpperCamelCase ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
_UpperCAmelCase :Union[str, Any] = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
_UpperCAmelCase :Tuple = field(
default=_UpperCamelCase ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__: Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__: str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__: str = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__: Dict = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__: Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__: List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowercase__: int = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowercase__: int = data_args.train_file.split('''.''' )[-1]
lowercase__: List[str] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowercase__: Any = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowercase__: Union[str, Any] = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowercase__: Union[str, Any] = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowercase__: Tuple = raw_datasets['''train'''].features['''label'''].names
lowercase__: str = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowercase__: Any = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
lowercase__: Any = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowercase__: List[Any] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__: Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowercase__: Dict = {'''Refused''': 0, '''Entailed''': 1}
lowercase__: Dict = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowercase__: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__UpperCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__UpperCAmelCase ):
lowercase__: Optional[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowercase__: int = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowercase__: str = examples['''statement''']
lowercase__: List[Any] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowercase__: Dict = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
lowercase__: List[str] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowercase__: Dict = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowercase__: str = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowercase__: List[str] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowercase__: List[Any] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowercase__: Tuple = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowercase__: Any = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowercase__: Tuple = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
lowercase__: Optional[Any] = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
lowercase__: Union[str, Any] = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__: str = default_data_collator
elif training_args.fpaa:
lowercase__: Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
lowercase__: List[Any] = None
# Initialize our Trainer
lowercase__: Union[str, Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
lowercase__: Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
lowercase__: List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__: List[str] = last_checkpoint
lowercase__: int = trainer.train(resume_from_checkpoint=a_ )
lowercase__: str = train_result.metrics
lowercase__: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
lowercase__: Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__: int = trainer.evaluate(eval_dataset=a_ )
lowercase__: Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
lowercase__: Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowercase__: Any = predict_dataset.remove_columns('''label''' )
lowercase__: List[str] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
lowercase__: Optional[int] = np.argmax(a_ , axis=1 )
lowercase__: Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
lowercase__: List[str] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
lowercase__: Dict = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 177
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__a : Optional[int] = img
__a : Any = img.shape[1]
__a : Optional[int] = img.shape[0]
__a : Tuple = dst_width
__a : List[Any] = dst_height
__a : Optional[int] = self.src_w / self.dst_w
__a : Tuple = self.src_h / self.dst_h
__a : Union[str, Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _lowerCamelCase ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__a : Optional[int] = self.img[self.get_y(_UpperCAmelCase )][self.get_x(_UpperCAmelCase )]
def _lowerCamelCase ( self , _UpperCAmelCase ):
return int(self.ratio_x * x )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return int(self.ratio_y * y )
if __name__ == "__main__":
A , A = 800, 600
A = imread('''image_data/lena.jpg''', 1)
A = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 160
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowerCamelCase = 8
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=BITS ):
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x * 255).int().clamp(0 , 255 )
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case_ )
UpperCAmelCase_ = rearrange(snake_case_ , "d -> d 1 1" )
UpperCAmelCase_ = rearrange(snake_case_ , "b c h w -> b c 1 h w" )
UpperCAmelCase_ = ((x & mask) != 0).float()
UpperCAmelCase_ = rearrange(snake_case_ , "b c d h w -> b (c d) h w" )
UpperCAmelCase_ = bits * 2 - 1
return bits
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=BITS ):
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x > 0).int()
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case_ , dtype=torch.intaa )
UpperCAmelCase_ = rearrange(snake_case_ , "d -> d 1 1" )
UpperCAmelCase_ = rearrange(snake_case_ , "b (c d) h w -> b c d h w" , d=8 )
UpperCAmelCase_ = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 255).clamp(0.0 , 1.0 )
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = True , lowerCAmelCase__=None , lowerCAmelCase__ = True , ):
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCAmelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[timestep]
UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCAmelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(snake_case_ , -scale , snake_case_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCAmelCase_ = self._get_variance(snake_case_ , snake_case_ )
UpperCAmelCase_ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCAmelCase_ = model_output.device if torch.is_tensor(snake_case_ ) else "cpu"
UpperCAmelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case_ ).to(snake_case_ )
UpperCAmelCase_ = self._get_variance(snake_case_ , snake_case_ ) ** 0.5 * eta * noise
UpperCAmelCase_ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case_ , pred_original_sample=snake_case_ )
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="epsilon" , lowerCAmelCase__=None , lowerCAmelCase__ = True , ):
UpperCAmelCase_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCAmelCase_ , UpperCAmelCase_ = torch.split(snake_case_ , sample.shape[1] , dim=1 )
else:
UpperCAmelCase_ = None
# 1. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[t]
UpperCAmelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(snake_case_ , -scale , snake_case_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCAmelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ = 0
if t > 0:
UpperCAmelCase_ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case_ ).to(model_output.device )
UpperCAmelCase_ = (self._get_variance(snake_case_ , predicted_variance=snake_case_ ) ** 0.5) * noise
UpperCAmelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case_ , pred_original_sample=snake_case_ )
class lowercase__ ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , _UpperCAmelCase : Optional[float] = 1.0 , ) -> List[str]:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = bit_scale
UpperCAmelCase_ = (
ddim_bit_scheduler_step if isinstance(__a , __a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : List[Any] , _UpperCAmelCase : Optional[int] = 256 , _UpperCAmelCase : Optional[int] = 256 , _UpperCAmelCase : Optional[int] = 50 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__a , )
UpperCAmelCase_ = decimal_to_bits(__a ) * self.bit_scale
UpperCAmelCase_ = latents.to(self.device )
self.scheduler.set_timesteps(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
UpperCAmelCase_ = self.unet(__a , __a ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(__a , __a , __a ).prev_sample
UpperCAmelCase_ = bits_to_decimal(__a )
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 368
|
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCamelCase = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCamelCase = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowerCamelCase = """zero2"""
lowerCamelCase = """zero3"""
lowerCamelCase = [ZEROa, ZEROa]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
UpperCAmelCase_ = parameterized.to_safe_name("_".join(str(lowerCAmelCase__ ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
lowerCamelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = models[model]
UpperCAmelCase_ = self.run_trainer(
stage=_UpperCAmelCase , model_name=_UpperCAmelCase , eval_steps=_UpperCAmelCase , num_train_epochs=1 , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
self.do_checks(_UpperCAmelCase )
return output_dir
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir("./xxx" , after=_UpperCAmelCase )
UpperCAmelCase_ = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_UpperCAmelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
UpperCAmelCase_ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
UpperCAmelCase_ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
UpperCAmelCase_ = self.get_launcher(_UpperCAmelCase )
UpperCAmelCase_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
return output_dir
def lowercase__ ( self : List[Any] , _UpperCAmelCase : int=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 241
| 0
|
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Dict , UpperCamelCase__: Dict=13 , UpperCamelCase__: int=30 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: Any=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple=5 , UpperCamelCase__: Any=4 , UpperCamelCase__: str=37 , UpperCamelCase__: Union[str, Any]="gelu" , UpperCamelCase__: str=0.1 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: Optional[Any]=3 , UpperCamelCase__: Any=None , UpperCamelCase__: Any=2 , ):
lowerCamelCase__ : str = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : int = type_sequence_label_size
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Optional[int] = scope
lowerCamelCase__ : Tuple = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : int = num_patches + 2
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Any = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : List[str] = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Any = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Optional[int] = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Tuple , UpperCamelCase__: List[str] ):
lowerCamelCase__ : str = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : List[Any] = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[str] = DeiTModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Dict=False ):
lowerCamelCase__ : List[Any] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase_ ( self: Dict ):
if not self.model_tester.is_training:
return
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase__ : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase__ : str = model(**UpperCamelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase__ : Optional[int] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase__ : int = model(**UpperCamelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowerCamelCase__ : int = problem_type["""title"""]
lowerCamelCase__ : Any = problem_type["""num_labels"""]
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase__ : Dict = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowerCamelCase__ : List[Any] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowerCamelCase__ : List[Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[Any] ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : Optional[int] = prepare_img()
lowerCamelCase__ : str = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowerCamelCase__ : Optional[Any] = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCamelCase__ : int = model(UpperCamelCase__ )
| 41
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
A = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = 'sgugger/tiny-distilbert-classification'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ : Optional[int] ):
self.assertTrue(hasattr(A_ ,'sequential' ) )
self.assertTrue(hasattr(A_ ,'cumulative' ) )
self.assertTrue(hasattr(A_ ,'current' ) )
self.assertTrue(hasattr(A_ ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() )
| 74
| 0
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE : List[str] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
SCREAMING_SNAKE_CASE : Optional[int] = [sys.executable] + distributed_args
execute_subprocess_async(_a , env=os.environ.copy() )
| 363
|
import re
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(_lowercase , _lowercase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 258
| 0
|
"""simple docstring"""
def lowercase ( a__ : int , a__ : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def lowercase ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 256
|
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase = logging.getLogger()
def lowercase ( ) -> List[str]:
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def lowercase ( a__ : List[Any] ) -> Optional[Any]:
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(a__ , '''all_results.json''' )
if os.path.exists(a__ ):
with open(a__ , '''r''' ) as f:
_UpperCamelCase = json.load(a__ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def lowercase ( ) -> str:
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( _lowercase):
@classmethod
def _UpperCamelCase ( cls : Any ) -> List[Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _UpperCamelCase ( cls : int ) -> str:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : str ) -> Dict:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : List[str] ) -> Tuple:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : List[str] ) -> str:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : Optional[Any] ) -> str:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : int ) -> Optional[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : str ) -> str:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''translation_no_trainer''' ) ) )
@slow
def _UpperCamelCase ( self : Any ) -> List[Any]:
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCamelCase )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.1_0 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''image_classification_no_trainer''' ) ) )
| 256
| 1
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a : Any = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
a : Tuple = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
a : List[str] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
a : Optional[Any] = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def A_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def A_ ( self : List[str] , lowercase_ : Optional[int] ):
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def A_ ( self : Any , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[str]=0.9 , lowercase_ : Optional[int]=3 , lowercase_ : Union[str, Any]=0.5 ):
if NLTK_VERSION >= version.Version('''3.6.5''' ):
snake_case_ = [
meteor_score.single_meteor_score(
word_tokenize(lowercase_ ) , word_tokenize(lowercase_ ) , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ )
for ref, pred in zip(lowercase_ , lowercase_ )
]
else:
snake_case_ = [
meteor_score.single_meteor_score(lowercase_ , lowercase_ , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ )
for ref, pred in zip(lowercase_ , lowercase_ )
]
return {"meteor": np.mean(lowercase_ )}
| 359
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
a : str = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a ( _lowerCamelCase ):
def __init__( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int]=None , lowercase_ : str=1 ):
snake_case_ = tokenizer
snake_case_ = dataset
snake_case_ = len(lowercase_ ) if n_tasks is None else n_tasks
snake_case_ = n_copies
def __iter__( self : Optional[Any] ):
snake_case_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
snake_case_ = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a ( _lowerCamelCase ):
def __init__( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int ):
snake_case_ = start_length
snake_case_ = eof_strings
snake_case_ = tokenizer
def __call__( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , **lowercase_ : List[str] ):
snake_case_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
snake_case_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowercase_ )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = re.split('''(%s)''' % '''|'''.join(__UpperCAmelCase ), __UpperCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=20, **__UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = defaultdict(__UpperCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__UpperCAmelCase ) ):
with torch.no_grad():
snake_case_ = batch['''ids'''].shape[-1]
snake_case_ = accelerator.unwrap_model(__UpperCAmelCase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']], num_return_sequences=__UpperCAmelCase, **__UpperCAmelCase )
# each task is generated batch_size times
snake_case_ = batch['''task_id'''].repeat(__UpperCAmelCase )
snake_case_ = accelerator.pad_across_processes(
__UpperCAmelCase, dim=1, pad_index=tokenizer.pad_token_id )
snake_case_ ,snake_case_ = accelerator.gather((generated_tokens, generated_tasks) )
snake_case_ = generated_tokens.cpu().numpy()
snake_case_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__UpperCAmelCase, __UpperCAmelCase ):
gen_token_dict[task].append(__UpperCAmelCase )
snake_case_ = [[] for _ in range(__UpperCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
snake_case_ = tokenizer.decode(__UpperCAmelCase, skip_special_tokens=__UpperCAmelCase, clean_up_tokenization_spaces=__UpperCAmelCase )
code_gens[task].append(remove_last_block(__UpperCAmelCase ) )
return code_gens
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = HfArgumentParser(__UpperCAmelCase )
snake_case_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
snake_case_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
snake_case_ = '''false'''
if args.num_workers is None:
snake_case_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
snake_case_ = Accelerator()
set_seed(args.seed, device_specific=__UpperCAmelCase )
# Load model and tokenizer
snake_case_ = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case_ = tokenizer.eos_token
snake_case_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
snake_case_ = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0, __UpperCAmelCase, __UpperCAmelCase )] ),
}
# Load evaluation dataset and metric
snake_case_ = load_dataset('''openai_humaneval''' )
snake_case_ = load_metric('''code_eval''' )
snake_case_ = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
snake_case_ = args.n_samples // args.batch_size
snake_case_ = TokenizedDataset(__UpperCAmelCase, human_eval['''test'''], n_copies=__UpperCAmelCase, n_tasks=__UpperCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
snake_case_ = DataLoader(__UpperCAmelCase, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
snake_case_ = code_eval_metric.compute(references=[''''''], predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
snake_case_ ,snake_case_ = accelerator.prepare(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = complete_code(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, n_tasks=__UpperCAmelCase, batch_size=args.batch_size, **__UpperCAmelCase, )
if accelerator.is_main_process:
snake_case_ = []
for task in tqdm(range(__UpperCAmelCase ) ):
snake_case_ = human_eval['''test'''][task]['''test''']
snake_case_ = F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
snake_case_ ,snake_case_ = code_eval_metric.compute(
references=__UpperCAmelCase, predictions=__UpperCAmelCase, num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file, '''w''' ) as fp:
json.dump(__UpperCAmelCase, __UpperCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 72
| 0
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def __lowerCamelCase ( lowerCamelCase__ : Tuple ):
'''simple docstring'''
lowerCamelCase = emb.weight.shape
lowerCamelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowerCamelCase = emb.weight.data
return lin_layer
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
lowerCamelCase = torch.load(_UpperCAmelCase , map_location="""cpu""" )
lowerCamelCase = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCamelCase = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase )
lowerCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
lowerCamelCase = state_dict['decoder.embed_tokens.weight']
lowerCamelCase = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowerCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase : str = parser.parse_args()
UpperCAmelCase : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 252
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100_0000 ) -> int:
lowerCamelCase__ : int = limit + 1
lowerCamelCase__ : Optional[Any] = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 0
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__A = logging.getLogger(__name__)
def snake_case_() -> Tuple:
"""simple docstring"""
_snake_case = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=lowerCAmelCase__ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=lowerCAmelCase__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=lowerCAmelCase__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=lowerCAmelCase__ , default='''data/dump''' , help='''The dump file prefix.''' )
_snake_case = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
_snake_case = BertTokenizer.from_pretrained(args.tokenizer_name )
_snake_case = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
_snake_case = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
_snake_case = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_snake_case = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
_snake_case = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
_snake_case = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_snake_case = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
_snake_case = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
_snake_case = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F"""{len(lowerCAmelCase__ )} examples to process.""" )
_snake_case = []
_snake_case = 0
_snake_case = 10_000
_snake_case = time.time()
for text in data:
_snake_case = F"""{bos} {text.strip()} {sep}"""
_snake_case = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
rslt.append(lowerCAmelCase__ )
iter += 1
if iter % interval == 0:
_snake_case = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
_snake_case = time.time()
logger.info('''Finished binarization''' )
logger.info(F"""{len(lowerCAmelCase__ )} examples processed.""" )
_snake_case = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
_snake_case = tokenizer.vocab_size
if vocab_size < (1 << 16):
_snake_case = [np.uintaa(lowerCAmelCase__ ) for d in rslt]
else:
_snake_case = [np.intaa(lowerCAmelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(lowerCAmelCase__ , '''wb''' ) as handle:
pickle.dump(rslt_ , lowerCAmelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 352
|
from __future__ import annotations
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
_snake_case = get_failure_array(_UpperCamelCase )
# 2) Step through text searching for pattern
_snake_case, _snake_case = 0, 0 # index into text, pattern
while i < len(_UpperCamelCase ):
if pattern[j] == text[i]:
if j == (len(_UpperCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_snake_case = failure[j - 1]
continue
i += 1
return False
def snake_case_(_UpperCamelCase ) -> list[int]:
"""simple docstring"""
_snake_case = [0]
_snake_case = 0
_snake_case = 1
while j < len(_UpperCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_snake_case = failure[i - 1]
continue
j += 1
failure.append(_UpperCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
__A = '''abc1abc12'''
__A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__A = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__A = '''ABABX'''
__A = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__A = '''AAAB'''
__A = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__A = '''abcdabcy'''
__A = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__A = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 278
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 100 ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowercase__ = get_logger()
lowercase__ = None
class __lowerCamelCase ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : Union[str, Any]=None , a_ : Optional[Any]=None , **a_ : Tuple ):
super().__init__(features=a_ )
import jax
from jaxlib.xla_client import Device
if isinstance(a_ , a_ ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(a_ )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
lowerCAmelCase_ : List[Any] = device if isinstance(a_ , a_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase_ : Dict = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
lowerCAmelCase_ : Tuple = str(jax.devices()[0] )
lowerCAmelCase_ : Dict = jnp_array_kwargs
@staticmethod
def lowerCamelCase ( ):
import jax
return {str(a_ ): device for device in jax.devices()}
def lowerCamelCase ( self : Tuple , a_ : Dict ):
import jax
import jax.numpy as jnp
if isinstance(a_ , a_ ) and column:
if all(
isinstance(a_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a_ , axis=0 )
return column
def lowerCamelCase ( self : Tuple , a_ : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(a_ , (str, bytes, type(a_ )) ):
return value
elif isinstance(a_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase_ : Any = {}
if isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCAmelCase_ : List[Any] = {"dtype": jnp.intaa}
else:
lowerCAmelCase_ : Tuple = {"dtype": jnp.intaa}
elif isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase_ : Optional[int] = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ : List[str] = np.asarray(a_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase_ : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a_ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCamelCase ( self : Any , a_ : List[str] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a_ , "__array__" ) and not isinstance(a_ , jax.Array ):
lowerCAmelCase_ : str = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
elif isinstance(a_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
return self._tensorize(a_ )
def lowerCamelCase ( self : Dict , a_ : dict ):
return map_nested(self._recursive_tensorize , a_ , map_list=a_ )
def lowerCamelCase ( self : List[str] , a_ : pa.Table ):
lowerCAmelCase_ : Tuple = self.numpy_arrow_extractor().extract_row(a_ )
lowerCAmelCase_ : Union[str, Any] = self.python_features_decoder.decode_row(a_ )
return self.recursive_tensorize(a_ )
def lowerCamelCase ( self : Any , a_ : pa.Table ):
lowerCAmelCase_ : Dict = self.numpy_arrow_extractor().extract_column(a_ )
lowerCAmelCase_ : Optional[int] = self.python_features_decoder.decode_column(a_ , pa_table.column_names[0] )
lowerCAmelCase_ : List[str] = self.recursive_tensorize(a_ )
lowerCAmelCase_ : Optional[Any] = self._consolidate(a_ )
return column
def lowerCamelCase ( self : Tuple , a_ : pa.Table ):
lowerCAmelCase_ : Tuple = self.numpy_arrow_extractor().extract_batch(a_ )
lowerCAmelCase_ : Tuple = self.python_features_decoder.decode_batch(a_ )
lowerCAmelCase_ : List[str] = self.recursive_tensorize(a_ )
for column_name in batch:
lowerCAmelCase_ : Tuple = self._consolidate(batch[column_name] )
return batch
| 241
| 0
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__A = HUGGINGFACE_HUB_CACHE
__A = 'config.json'
__A = 'diffusion_pytorch_model.bin'
__A = 'diffusion_flax_model.msgpack'
__A = 'model.onnx'
__A = 'diffusion_pytorch_model.safetensors'
__A = 'weights.pb'
__A = 'https://huggingface.co'
__A = default_cache_path
__A = 'diffusers_modules'
__A = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
__A = ['fp16', 'non-ema']
__A = '.self_attn'
| 75
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Union[str, Any] ) -> Union[str, Any]:
_A = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_A = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_A = tempfile.mkdtemp()
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , __A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
# load decoder from hub
_A = '''hf-internal-testing/ngram-beam-search-decoder'''
def __A ( self: Tuple , **__A: str ) -> str:
_A = self.add_kwargs_tokens_map.copy()
kwargs.update(__A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: Any , **__A: List[Any] ) -> Union[str, Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__A )
def __A ( self: List[Any] , **__A: Union[str, Any] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__A )
def __A ( self: List[str] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __A ( self: List[str] ) -> Optional[Any]:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
processor.save_pretrained(self.tmpdirname )
_A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __A )
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __A ( self: str ) -> Any:
_A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __A ( self: List[str] ) -> str:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = floats_list((3, 10_00) )
_A = feature_extractor(__A , return_tensors='''np''' )
_A = processor(__A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self: Union[str, Any] ) -> Optional[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = '''This is a test string'''
_A = processor(text=__A )
_A = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self: List[str] , __A: Optional[int]=(2, 10, 16) , __A: Optional[int]=77 ) -> List[Any]:
np.random.seed(__A )
return np.random.rand(*__A )
def __A ( self: List[Any] ) -> Optional[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_A = processor.decode(__A )
_A = decoder.decode_beams(__A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __A ( self: str , __A: Any ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_A = processor.batch_decode(__A )
else:
with get_context(__A ).Pool() as pool:
_A = processor.batch_decode(__A , __A )
_A = list(__A )
with get_context('''fork''' ).Pool() as p:
_A = decoder.decode_beams_batch(__A , __A )
_A ,_A ,_A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__A , decoded_processor.logit_score )
self.assertListEqual(__A , decoded_processor.lm_score )
def __A ( self: Optional[Any] ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
_A = 15
_A = -20.0
_A = -4.0
_A = processor.batch_decode(
__A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
_A = decoded_processor_out.text
_A = list(__A )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
__A , __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
_A = [d[0][0] for d in decoded_decoder_out]
_A = [d[0][2] for d in decoded_decoder_out]
_A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __A )
self.assertTrue(np.array_equal(__A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __A , atol=1e-3 ) )
self.assertTrue(np.array_equal(__A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __A , atol=1e-3 ) )
def __A ( self: Optional[int] ) -> Dict:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
_A = 2.0
_A = 5.0
_A = -20.0
_A = True
_A = processor.batch_decode(
__A , alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
_A = decoded_processor_out.text
_A = list(__A )
decoder.reset_params(
alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
__A , __A , )
_A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __A )
_A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __A )
def __A ( self: int ) -> Optional[Any]:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(__A )
_A = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__A , __A )
def __A ( self: Tuple ) -> Any:
_A = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_A = WavaVecaProcessorWithLM.from_pretrained(__A )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(__A )
_A = os.listdir(__A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__A , __A )
def __A ( self: List[str] ) -> Tuple:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = floats_list((3, 10_00) )
_A = processor_wavaveca(__A , return_tensors='''np''' )
_A = processor_auto(__A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_A = self._get_dummy_logits()
_A = processor_wavaveca.batch_decode(__A )
_A = processor_auto.batch_decode(__A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __A ( self: Optional[int] ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __A ( __A: int , __A: List[str] ) -> Union[str, Any]:
_A = [d[key] for d in offsets]
return retrieved_list
def __A ( self: Optional[Any] ) -> int:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()[0]
_A = processor.decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __A ( self: Optional[Any] ) -> Tuple:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()
_A = processor.batch_decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __A ( self: Optional[Any] ) -> Optional[Any]:
import torch
_A = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__A )
_A = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_A = iter(__A )
_A = next(__A )
_A = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_A = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_A = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_A = model(__A ).logits.cpu().numpy()
_A = processor.decode(logits[0] , output_word_offsets=__A )
_A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_A = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_A = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , __A )
self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , output.text )
# output times
_A = torch.tensor(self.get_from_offsets(__A , '''start_time''' ) )
_A = torch.tensor(self.get_from_offsets(__A , '''end_time''' ) )
# fmt: off
_A = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
_A = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
| 75
| 1
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = HfArgumentParser(A__ )
UpperCamelCase = parser.parse_args_into_dataclasses()[0]
UpperCamelCase = TensorFlowBenchmark(args=A__ )
try:
UpperCamelCase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCamelCase = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
UpperCamelCase = ' '.join(str(A__ ).split(' ' )[:-1] )
UpperCamelCase = ''
UpperCamelCase = eval(str(A__ ).split(' ' )[-1] )
UpperCamelCase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A__ )
if len(A__ ) > 0:
UpperCamelCase = full_error_msg + begin_error_msg + str(A__ )
raise ValueError(A__ )
benchmark.run()
if __name__ == "__main__":
main()
| 28
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return np.maximum(0 , UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
A_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
A_ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 75
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__A = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 75
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__: Optional[int] = logging.get_logger(__name__)
a__: str = {'vocab_file': 'sentencepiece.bpe.model'}
a__: Tuple = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
a__: Optional[Any] = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
a__: Union[str, Any] = '▁'
class SCREAMING_SNAKE_CASE__ ( _lowercase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self,__lowerCamelCase,__lowerCamelCase="<s>",__lowerCamelCase="</s>",__lowerCamelCase="</s>",__lowerCamelCase="<s>",__lowerCamelCase="<unk>",__lowerCamelCase="<pad>",__lowerCamelCase="<mask>",__lowerCamelCase = None,**__lowerCamelCase,):
A__ = AddedToken(__lowerCAmelCase,lstrip=__lowerCAmelCase,rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase,__lowerCAmelCase ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase,eos_token=__lowerCAmelCase,unk_token=__lowerCAmelCase,sep_token=__lowerCAmelCase,cls_token=__lowerCAmelCase,pad_token=__lowerCAmelCase,mask_token=__lowerCAmelCase,sp_model_kwargs=self.sp_model_kwargs,**__lowerCAmelCase,)
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
A__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
A__ = len(self.sp_model ) - 1
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase,token_ids_a=__lowerCAmelCase,already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase ( self ):
return len(self.sp_model )
def UpperCamelCase ( self ):
A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self,__lowerCamelCase ):
return self.sp_model.encode(__lowerCAmelCase,out_type=__lowerCAmelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(__lowerCAmelCase )
return spm_id if spm_id else self.unk_token_id
def UpperCamelCase ( self,__lowerCamelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowerCAmelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = []
A__ = ''''''
A__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
A__ = True
A__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
A__ = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self,__lowerCamelCase ):
A__ = d
# for backward compatibility
if not hasattr(self,'''sp_model_kwargs''' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ = os.path.join(
__lowerCAmelCase,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase,'''wb''' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 193
|
"""simple docstring"""
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A_ ):
if len(A_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A_ ) )
return data_lists
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for dlist, weight in zip(A_, A_ ):
_lowerCamelCase : Any = min(A_ )
_lowerCamelCase : Optional[Any] = max(A_ )
_lowerCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCamelCase : str = F'''Invalid weight of {weight:f} provided'''
raise ValueError(A_ )
score_lists.append(A_ )
return score_lists
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A_ ):
_lowerCamelCase : List[str] = final_scores[j] + ele
return final_scores
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = get_data(A_ )
_lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ )
_lowerCamelCase : str = generate_final_scores(A_ )
# append scores to source data
for i, ele in enumerate(A_ ):
source_data[i].append(A_ )
return source_data
| 72
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _UpperCAmelCase ):
if not nums:
raise ValueError("List is empty" )
return sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : Optional[int]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
__UpperCAmelCase : Optional[int] = n
__UpperCAmelCase : Optional[int] = [
[math.inf for j in range(0 , UpperCAmelCase_ )] for i in range(0 , UpperCAmelCase_ )
] # adjacency matrix for weight
__UpperCAmelCase : str = [
[math.inf for j in range(0 , UpperCAmelCase_ )] for i in range(0 , UpperCAmelCase_ )
] # dp[i][j] stores minimum distance from i to j
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : str = w
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__UpperCAmelCase : int = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 37
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowercase__ :
lowercase__ = XGLMConfig
lowercase__ = {}
lowercase__ = """gelu"""
def __init__( self : List[str] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any]=14 ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Union[str, Any]=99 ,lowerCamelCase__ : List[Any]=32 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : str=37 ,lowerCamelCase__ : List[Any]="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=512 ,lowerCamelCase__ : List[str]=0.0_2 ,):
'''simple docstring'''
_UpperCamelCase : str = parent
_UpperCamelCase : Optional[int] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : Dict = use_input_mask
_UpperCamelCase : str = use_labels
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Tuple = d_model
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[Any] = ffn_dim
_UpperCamelCase : Optional[Any] = activation_function
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[int] = 2
_UpperCamelCase : int = 1
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : List[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) ,clip_value_min=0 ,clip_value_max=3 )
_UpperCamelCase : List[Any] = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Union[str, Any] = self.get_config()
_UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=lowerCamelCase__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=lowerCamelCase__ ,)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : int = config_and_inputs
_UpperCamelCase : str = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowercase__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowercase__ = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = TFXGLMModelTester(self )
_UpperCamelCase : str = ConfigTester(self ,config_class=lowerCamelCase__ ,n_embd=37 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = TFXGLMModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[Any]=True ):
'''simple docstring'''
_UpperCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_UpperCamelCase : Optional[int] = tf.convert_to_tensor([[2, 268, 9865]] ,dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_UpperCamelCase : Union[str, Any] = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
_UpperCamelCase : Tuple = model.generate(lowerCamelCase__ ,do_sample=lowerCamelCase__ ,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_UpperCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
_UpperCamelCase : List[str] = tokenizer('Today is a nice day and' ,return_tensors='tf' )
_UpperCamelCase : Any = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
_UpperCamelCase : Optional[int] = model.generate(lowerCamelCase__ ,do_sample=lowerCamelCase__ ,seed=[7, 0] )
_UpperCamelCase : Any = tokenizer.decode(output_ids[0] ,skip_special_tokens=lowerCamelCase__ )
_UpperCamelCase : List[Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_UpperCamelCase : List[str] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_UpperCamelCase : Optional[Any] = 'left'
# use different length sentences to test batching
_UpperCamelCase : List[str] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
_UpperCamelCase : Dict = tokenizer(lowerCamelCase__ ,return_tensors='tf' ,padding=lowerCamelCase__ )
_UpperCamelCase : List[Any] = inputs['input_ids']
_UpperCamelCase : int = model.generate(input_ids=lowerCamelCase__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=12 )
_UpperCamelCase : int = tokenizer(sentences[0] ,return_tensors='tf' ).input_ids
_UpperCamelCase : List[str] = model.generate(input_ids=lowerCamelCase__ ,max_new_tokens=12 )
_UpperCamelCase : List[str] = tokenizer(sentences[1] ,return_tensors='tf' ).input_ids
_UpperCamelCase : Union[str, Any] = model.generate(input_ids=lowerCamelCase__ ,max_new_tokens=12 )
_UpperCamelCase : List[str] = tokenizer.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=lowerCamelCase__ )
_UpperCamelCase : List[Any] = tokenizer.decode(output_padded[0] ,skip_special_tokens=lowerCamelCase__ )
_UpperCamelCase : Any = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,[non_padded_sentence, padded_sentence] )
| 83
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A )
lowerCAmelCase_ = emb.weight.data
return lin_layer
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] )
lowerCAmelCase_ = checkpoint['''model''']
remove_ignore_keys_(_A )
lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
lowerCAmelCase_ = XGLMConfig(
vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase_ = XGLMForCausalLM(_A )
lowerCAmelCase_ = model.load_state_dict(_A , strict=_A )
print(_A )
lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A = parser.parse_args()
_A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 278
| 0
|
"""simple docstring"""
import argparse
import datetime
def _A ( lowercase ):
"""simple docstring"""
a ={
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
a ={0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
a =int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
a =date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
a =int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
a =date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
a =int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
a =datetime.date(int(lowercase ) , int(lowercase ) , int(lowercase ) )
# Start math
if m <= 2:
a =y - 1
a =m + 12
# maths var
a =int(str(lowercase )[:2] )
a =int(str(lowercase )[2:] )
a =int(2.6 * m - 5.39 )
a =int(c / 4 )
a =int(k / 4 )
a =int(d + k )
a =int(t + u + v + x )
a =int(z - (2 * c) )
a =round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
a =f'''Your date {date_input}, is a {days[str(lowercase )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
lowerCamelCase_ : int = parser.parse_args()
zeller(args.date_input)
| 215
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = IFInpaintingSuperResolutionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self , __A , __A=0 ) -> Optional[int]:
if str(__A ).startswith('''mps''' ):
a =torch.manual_seed(__A )
else:
a =torch.Generator(device=__A ).manual_seed(__A )
a =floats_tensor((1, 3, 16, 16) , rng=random.Random(__A ) ).to(__A )
a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
a ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self ) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 215
| 1
|
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> int:
"""simple docstring"""
while b:
lowerCamelCase_, lowerCamelCase_ =b, a % b
return a
def a_ ( __snake_case : int , __snake_case : int ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(__snake_case , a % b )
def a_ ( ) -> str:
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 75
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Union[str, Any] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 75
| 1
|
def __magic_name__ ( A , A , A , A ) -> Any:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
snake_case = mf_knapsack(i - 1 , A , A , A )
else:
snake_case = max(
mf_knapsack(i - 1 , A , A , A ) , mf_knapsack(i - 1 , A , A , j - wt[i - 1] ) + val[i - 1] , )
snake_case = val
return f[i][j]
def __magic_name__ ( A , A , A , A ) -> Tuple:
snake_case = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
snake_case = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
snake_case = dp[i - 1][w_]
return dp[n][w_], dp
def __magic_name__ ( A , A , A ) -> List[Any]:
if not (isinstance(A , (list, tuple) ) and isinstance(A , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
snake_case = len(A )
if num_items != len(A ):
snake_case = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(A )} values'''
)
raise ValueError(A )
for i in range(A ):
if not isinstance(wt[i] , A ):
snake_case = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(A )
snake_case , snake_case = knapsack(A , A , A , A )
snake_case = set()
_construct_solution(A , A , A , A , A )
return optimal_val, example_optional_set
def __magic_name__ ( A , A , A , A , A ) -> Optional[int]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(A , A , i - 1 , A , A )
else:
optimal_set.add(A )
_construct_solution(A , A , i - 1 , j - wt[i - 1] , A )
if __name__ == "__main__":
lowerCAmelCase_ = [3, 2, 4, 4]
lowerCAmelCase_ = [4, 3, 2, 3]
lowerCAmelCase_ = 4
lowerCAmelCase_ = 6
lowerCAmelCase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCAmelCase_ , lowerCAmelCase_ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCAmelCase_ , lowerCAmelCase_ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 355
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332
| 0
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ : Tuple = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a_ : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
a_ : Tuple = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def a_ ( __snake_case : Any , __snake_case : Tuple , __snake_case : Dict , __snake_case : str ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowerCamelCase_ =True
# Deal with multi-line cases
elif (
re.search(
rF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , __snake_case , )
is not None
):
lowerCamelCase_ =True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCamelCase_ =True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCamelCase_ =[
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
lowerCamelCase_ =['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
lowerCamelCase_ =True
if not attribute_used:
lowerCamelCase_ =False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCamelCase_ =True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCamelCase_ =True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCamelCase_ =True
elif attribute.endswith('''_token_id''' ):
lowerCamelCase_ =True
# configuration class specific cases
if not case_allowed:
lowerCamelCase_ =SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCamelCase_ =allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def a_ ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =dict(inspect.signature(config_class.__init__ ).parameters )
lowerCamelCase_ =[x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
lowerCamelCase_ =[signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCamelCase_ ={}
if len(config_class.attribute_map ) > 0:
lowerCamelCase_ ={v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCamelCase_ =inspect.getsourcefile(__snake_case )
lowerCamelCase_ =os.path.dirname(__snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCamelCase_ =[os.path.join(__snake_case , __snake_case ) for fn in os.listdir(__snake_case ) if fn.startswith('''modeling_''' )]
# Get the source code strings
lowerCamelCase_ =[]
for path in modeling_paths:
if os.path.isfile(__snake_case ):
with open(__snake_case ) as fp:
modeling_sources.append(fp.read() )
lowerCamelCase_ =[]
for config_param, default_value in zip(__snake_case , __snake_case ):
# `attributes` here is all the variant names for `config_param`
lowerCamelCase_ =[config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__snake_case , __snake_case , __snake_case , __snake_case ):
unused_attributes.append(attributes[0] )
return sorted(__snake_case )
def a_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCamelCase_ =[
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __snake_case : inspect.isclass(__snake_case )
and issubclass(__snake_case , __snake_case )
and inspect.getmodule(__snake_case ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCamelCase_ =check_config_attributes_being_used(__snake_case )
if len(__snake_case ) > 0:
lowerCamelCase_ =unused_attributes
if len(__snake_case ) > 0:
lowerCamelCase_ ='''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(__snake_case )
if __name__ == "__main__":
check_config_attributes()
| 75
|
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]:
"""simple docstring"""
lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase_ ={
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
lowerCamelCase_ =frequencies_dict
if not case_sensitive:
lowerCamelCase_ =ciphertext.lower()
# Chi squared statistic values
lowerCamelCase_ ={}
# cycle through all of the shifts
for shift in range(len(__snake_case ) ):
lowerCamelCase_ =''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len(
__snake_case )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase_ =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase_ =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ =decrypted_with_shift.count(__snake_case )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase_ =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase_ =min(
__snake_case , key=__snake_case , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase_
), (
lowerCamelCase_
),
) =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 75
| 1
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCAmelCase ( lowercase_ ):
def __get__( self :Tuple , _lowercase :str , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
lowercase__ = "__cached_" + self.fget.__name__
lowercase__ = getattr(_lowercase , _lowercase , _lowercase )
if cached is None:
lowercase__ = self.fget(_lowercase )
setattr(_lowercase , _lowercase , _lowercase )
return cached
def _A ( __magic_name__ ):
lowercase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def _A ( __magic_name__ ):
if is_torch_fx_proxy(__magic_name__ ):
return True
if is_torch_available():
import torch
if isinstance(__magic_name__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__magic_name__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__magic_name__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(__magic_name__ , np.ndarray )
def _A ( __magic_name__ ):
return isinstance(__magic_name__ , np.ndarray )
def _A ( __magic_name__ ):
return _is_numpy(__magic_name__ )
def _A ( __magic_name__ ):
import torch
return isinstance(__magic_name__ , torch.Tensor )
def _A ( __magic_name__ ):
return False if not is_torch_available() else _is_torch(__magic_name__ )
def _A ( __magic_name__ ):
import torch
return isinstance(__magic_name__ , torch.device )
def _A ( __magic_name__ ):
return False if not is_torch_available() else _is_torch_device(__magic_name__ )
def _A ( __magic_name__ ):
import torch
if isinstance(__magic_name__ , __magic_name__ ):
if hasattr(__magic_name__ , __magic_name__ ):
lowercase__ = getattr(__magic_name__ , __magic_name__ )
else:
return False
return isinstance(__magic_name__ , torch.dtype )
def _A ( __magic_name__ ):
return False if not is_torch_available() else _is_torch_dtype(__magic_name__ )
def _A ( __magic_name__ ):
import tensorflow as tf
return isinstance(__magic_name__ , tf.Tensor )
def _A ( __magic_name__ ):
return False if not is_tf_available() else _is_tensorflow(__magic_name__ )
def _A ( __magic_name__ ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__magic_name__ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(__magic_name__ )
return type(__magic_name__ ) == tf.Tensor
def _A ( __magic_name__ ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(__magic_name__ )
def _A ( __magic_name__ ):
import jax.numpy as jnp # noqa: F811
return isinstance(__magic_name__ , jnp.ndarray )
def _A ( __magic_name__ ):
return False if not is_flax_available() else _is_jax(__magic_name__ )
def _A ( __magic_name__ ):
if isinstance(__magic_name__ , (dict, UserDict) ):
return {k: to_py_obj(__magic_name__ ) for k, v in obj.items()}
elif isinstance(__magic_name__ , (list, tuple) ):
return [to_py_obj(__magic_name__ ) for o in obj]
elif is_tf_tensor(__magic_name__ ):
return obj.numpy().tolist()
elif is_torch_tensor(__magic_name__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__magic_name__ ):
return np.asarray(__magic_name__ ).tolist()
elif isinstance(__magic_name__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _A ( __magic_name__ ):
if isinstance(__magic_name__ , (dict, UserDict) ):
return {k: to_numpy(__magic_name__ ) for k, v in obj.items()}
elif isinstance(__magic_name__ , (list, tuple) ):
return np.array(__magic_name__ )
elif is_tf_tensor(__magic_name__ ):
return obj.numpy()
elif is_torch_tensor(__magic_name__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__magic_name__ ):
return np.asarray(__magic_name__ )
else:
return obj
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = fields(self )
# Safety and consistency checks
if not len(_lowercase ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
lowercase__ = getattr(self , class_fields[0].name )
lowercase__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_lowercase ):
if isinstance(_lowercase , _lowercase ):
lowercase__ = first_field.items()
lowercase__ = True
else:
try:
lowercase__ = iter(_lowercase )
lowercase__ = True
except TypeError:
lowercase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_lowercase ):
if (
not isinstance(_lowercase , (list, tuple) )
or not len(_lowercase ) == 2
or not isinstance(element[0] , _lowercase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowercase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowercase__ = element[1]
elif first_field is not None:
lowercase__ = first_field
else:
for field in class_fields:
lowercase__ = getattr(self , field.name )
if v is not None:
lowercase__ = v
def __delitem__( self :Union[str, Any] , *_lowercase :str , **_lowercase :Dict ):
'''simple docstring'''
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def UpperCAmelCase ( self :List[Any] , *_lowercase :Dict , **_lowercase :Dict ):
'''simple docstring'''
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def UpperCAmelCase ( self :int , *_lowercase :Tuple , **_lowercase :Dict ):
'''simple docstring'''
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def UpperCAmelCase ( self :Tuple , *_lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self :Any , _lowercase :List[str] ):
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
lowercase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self :Union[str, Any] , _lowercase :Optional[int] , _lowercase :Any ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_lowercase , _lowercase )
super().__setattr__(_lowercase , _lowercase )
def __setitem__( self :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :List[Any] ):
'''simple docstring'''
super().__setitem__(_lowercase , _lowercase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class lowerCAmelCase ( lowercase_ , lowercase_ ):
@classmethod
def UpperCAmelCase ( cls :Dict , _lowercase :List[str] ):
'''simple docstring'''
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'longest'
__lowerCamelCase = 'max_length'
__lowerCamelCase = 'do_not_pad'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'pt'
__lowerCamelCase = 'tf'
__lowerCamelCase = 'np'
__lowerCamelCase = 'jax'
class lowerCAmelCase :
def __init__( self :str , _lowercase :List[ContextManager] ):
'''simple docstring'''
lowercase__ = context_managers
lowercase__ = ExitStack()
def __enter__( self :Tuple ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(_lowercase )
def __exit__( self :List[str] , *_lowercase :Optional[Any] , **_lowercase :List[str] ):
'''simple docstring'''
self.stack.__exit__(*_lowercase , **_lowercase )
def _A ( __magic_name__ ):
lowercase__ = infer_framework(__magic_name__ )
if framework == "tf":
lowercase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowercase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowercase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _A ( __magic_name__ ):
lowercase__ = model_class.__name__
lowercase__ = infer_framework(__magic_name__ )
if framework == "tf":
lowercase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowercase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowercase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _A ( __magic_name__ , __magic_name__ = "" , __magic_name__ = "." ):
def _flatten_dict(__magic_name__ , __magic_name__="" , __magic_name__="." ):
for k, v in d.items():
lowercase__ = str(__magic_name__ ) + delimiter + str(__magic_name__ ) if parent_key else k
if v and isinstance(__magic_name__ , __magic_name__ ):
yield from flatten_dict(__magic_name__ , __magic_name__ , delimiter=__magic_name__ ).items()
else:
yield key, v
return dict(_flatten_dict(__magic_name__ , __magic_name__ , __magic_name__ ) )
@contextmanager
def _A ( __magic_name__ , __magic_name__ = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _A ( __magic_name__ , __magic_name__=None ):
if is_numpy_array(__magic_name__ ):
return np.transpose(__magic_name__ , axes=__magic_name__ )
elif is_torch_tensor(__magic_name__ ):
return array.T if axes is None else array.permute(*__magic_name__ )
elif is_tf_tensor(__magic_name__ ):
import tensorflow as tf
return tf.transpose(__magic_name__ , perm=__magic_name__ )
elif is_jax_tensor(__magic_name__ ):
return jnp.transpose(__magic_name__ , axes=__magic_name__ )
else:
raise ValueError(f'''Type not supported for transpose: {type(__magic_name__ )}.''' )
def _A ( __magic_name__ , __magic_name__ ):
if is_numpy_array(__magic_name__ ):
return np.reshape(__magic_name__ , __magic_name__ )
elif is_torch_tensor(__magic_name__ ):
return array.reshape(*__magic_name__ )
elif is_tf_tensor(__magic_name__ ):
import tensorflow as tf
return tf.reshape(__magic_name__ , __magic_name__ )
elif is_jax_tensor(__magic_name__ ):
return jnp.reshape(__magic_name__ , __magic_name__ )
else:
raise ValueError(f'''Type not supported for reshape: {type(__magic_name__ )}.''' )
def _A ( __magic_name__ , __magic_name__=None ):
if is_numpy_array(__magic_name__ ):
return np.squeeze(__magic_name__ , axis=__magic_name__ )
elif is_torch_tensor(__magic_name__ ):
return array.squeeze() if axis is None else array.squeeze(dim=__magic_name__ )
elif is_tf_tensor(__magic_name__ ):
import tensorflow as tf
return tf.squeeze(__magic_name__ , axis=__magic_name__ )
elif is_jax_tensor(__magic_name__ ):
return jnp.squeeze(__magic_name__ , axis=__magic_name__ )
else:
raise ValueError(f'''Type not supported for squeeze: {type(__magic_name__ )}.''' )
def _A ( __magic_name__ , __magic_name__ ):
if is_numpy_array(__magic_name__ ):
return np.expand_dims(__magic_name__ , __magic_name__ )
elif is_torch_tensor(__magic_name__ ):
return array.unsqueeze(dim=__magic_name__ )
elif is_tf_tensor(__magic_name__ ):
import tensorflow as tf
return tf.expand_dims(__magic_name__ , axis=__magic_name__ )
elif is_jax_tensor(__magic_name__ ):
return jnp.expand_dims(__magic_name__ , axis=__magic_name__ )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(__magic_name__ )}.''' )
def _A ( __magic_name__ ):
if is_numpy_array(__magic_name__ ):
return np.size(__magic_name__ )
elif is_torch_tensor(__magic_name__ ):
return array.numel()
elif is_tf_tensor(__magic_name__ ):
import tensorflow as tf
return tf.size(__magic_name__ )
elif is_jax_tensor(__magic_name__ ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(__magic_name__ )}.''' )
def _A ( __magic_name__ , __magic_name__ ):
for key, value in auto_map.items():
if isinstance(__magic_name__ , (tuple, list) ):
lowercase__ = [f'''{repo_id}--{v}''' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowercase__ = f'''{repo_id}--{value}'''
return auto_map
def _A ( __magic_name__ ):
for base_class in inspect.getmro(__magic_name__ ):
lowercase__ = base_class.__module__
lowercase__ = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 201
|
from __future__ import annotations
from collections import deque
class lowerCAmelCase :
def __init__( self :List[Any] , _lowercase :list[str] ):
'''simple docstring'''
lowercase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_lowercase )
self.set_fail_transitions()
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :str ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase ( self :List[str] , _lowercase :str ):
'''simple docstring'''
lowercase__ = 0
for character in keyword:
lowercase__ = self.find_next_state(_lowercase , _lowercase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase__ = len(self.adlist ) - 1
else:
lowercase__ = next_state
self.adlist[current_state]["output"].append(_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(_lowercase )
lowercase__ = 0
while q:
lowercase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_lowercase )
lowercase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(_lowercase , self.adlist[child]["value"] ) is None
and state != 0
):
lowercase__ = self.adlist[state]["fail_state"]
lowercase__ = self.find_next_state(
_lowercase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
lowercase__ = 0
lowercase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def UpperCAmelCase ( self :Optional[Any] , _lowercase :str ):
'''simple docstring'''
lowercase__ = {} # returns a dict with keywords and list of its occurrences
lowercase__ = 0
for i in range(len(_lowercase ) ):
while (
self.find_next_state(_lowercase , string[i] ) is None
and current_state != 0
):
lowercase__ = self.adlist[current_state]["fail_state"]
lowercase__ = self.find_next_state(_lowercase , string[i] )
if next_state is None:
lowercase__ = 0
else:
lowercase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase__ = []
result[key].append(i - len(_lowercase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201
| 1
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Any = 11
lowerCAmelCase__ : Dict = int("""1""" + """0""" * digit_len )
for num in range(UpperCamelCase , UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCamelCase , UpperCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowerCAmelCase__ : str = 10
return solutions
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 2 ):
"""simple docstring"""
lowerCAmelCase__ : str = 1.0
for fraction in fraction_list(UpperCamelCase ):
lowerCAmelCase__ : str = Fraction(UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 37
|
'''simple docstring'''
from math import sqrt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase__ : int = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase__ : Optional[Any] = False
for divisor in range(2 , int(round(sqrt(UpperCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase__ : Any = False
break
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'status' must been from type bool"
return status
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase__ : List[str] = list(range(2 , n + 1 ) )
lowerCAmelCase__ : str = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(UpperCamelCase ) ):
for j in range(i + 1 , len(UpperCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase__ : List[Any] = 0
# filters actual prime numbers.
lowerCAmelCase__ : List[Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase__ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(UpperCamelCase ):
ans.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase__ : Optional[Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase__ : Dict = 2
lowerCAmelCase__ : Dict = number
if number == 0 or number == 1:
ans.append(UpperCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(UpperCamelCase ):
while quotient != 1:
if is_prime(UpperCamelCase ) and (quotient % factor == 0):
ans.append(UpperCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase__ : Optional[int] = 0
# prime factorization of 'number'
lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Any = max(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase__ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = min(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , UpperCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , UpperCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase ) and (number > 2) and is_even(UpperCamelCase )
), "'number' must been an int, even and > 2"
lowerCAmelCase__ : Dict = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase__ : Dict = get_prime_numbers(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase )
# run variable for while-loops.
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase__ : Any = True
while i < len_pn and loop:
lowerCAmelCase__ : List[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase__ : Optional[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (len(UpperCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase__ : int = 0
while numbera != 0:
lowerCAmelCase__ : Any = numbera % numbera
lowerCAmelCase__ : str = numbera
lowerCAmelCase__ : List[str] = rest
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase__ : int = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase__ : int = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Any = prime_factorization(UpperCamelCase )
elif numbera == 1 or numbera == 1:
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = max(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase__ : int = prime_fac_a.count(UpperCamelCase )
lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase )
for _ in range(max(UpperCamelCase , UpperCamelCase ) ):
ans *= n
else:
lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase )
for _ in range(UpperCamelCase ):
ans *= n
done.append(UpperCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase__ : Optional[int] = prime_fac_a.count(UpperCamelCase )
for _ in range(UpperCamelCase ):
ans *= n
done.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(UpperCamelCase ):
ans += 1
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and is_prime(
UpperCamelCase ), "'ans' must been a prime number and from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
is_prime(UpperCamelCase ) and is_prime(UpperCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase__ : Dict = p_number_a + 1 # jump to the next number
lowerCAmelCase__ : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(UpperCamelCase ):
number += 1
while number < p_number_a:
ans.append(UpperCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(UpperCamelCase ):
number += 1
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and ans[0] != p_number_a
and ans[len(UpperCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase__ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(UpperCamelCase )
# precondition
assert ans[0] == 1 and ans[len(UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase__ : Optional[int] = get_divisors(UpperCamelCase )
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (divisors[0] == 1)
and (divisors[len(UpperCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase__ : int = gcd(abs(UpperCamelCase ) , abs(UpperCamelCase ) )
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase__ : str = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Optional[Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase__ : Dict = ans
ans += fiba
lowerCAmelCase__ : str = tmp
return ans
| 37
| 1
|
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DownBlockaD # noqa F405
__lowerCamelCase = "down"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = ResnetDownsampleBlockaD # noqa F405
__lowerCamelCase = "down"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = AttnDownBlockaD # noqa F405
__lowerCamelCase = "down"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = CrossAttnDownBlockaD # noqa F405
__lowerCamelCase = "down"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Union[str, Any]= super().prepare_init_args_and_inputs_for_common()
lowercase__ : Dict= 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = SimpleCrossAttnDownBlockaD # noqa F405
__lowerCamelCase = "down"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : List[str]= super().prepare_init_args_and_inputs_for_common()
lowercase__ : Tuple= 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = SkipDownBlockaD # noqa F405
__lowerCamelCase = "down"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = AttnSkipDownBlockaD # noqa F405
__lowerCamelCase = "down"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DownEncoderBlockaD # noqa F405
__lowerCamelCase = "down"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= {
"in_channels": 32,
"out_channels": 32,
}
lowercase__ : str= self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = AttnDownEncoderBlockaD # noqa F405
__lowerCamelCase = "down"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= {
"in_channels": 32,
"out_channels": 32,
}
lowercase__ : List[Any]= self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = UNetMidBlockaD # noqa F405
__lowerCamelCase = "mid"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= {
"in_channels": 32,
"temb_channels": 128,
}
lowercase__ : Dict= self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = UNetMidBlockaDCrossAttn # noqa F405
__lowerCamelCase = "mid"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Union[str, Any]= super().prepare_init_args_and_inputs_for_common()
lowercase__ : List[Any]= 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = UNetMidBlockaDSimpleCrossAttn # noqa F405
__lowerCamelCase = "mid"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : List[str]= super().prepare_init_args_and_inputs_for_common()
lowercase__ : Dict= 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = UpBlockaD # noqa F405
__lowerCamelCase = "up"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = ResnetUpsampleBlockaD # noqa F405
__lowerCamelCase = "up"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = CrossAttnUpBlockaD # noqa F405
__lowerCamelCase = "up"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Tuple= super().prepare_init_args_and_inputs_for_common()
lowercase__ : Optional[Any]= 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = SimpleCrossAttnUpBlockaD # noqa F405
__lowerCamelCase = "up"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ , include_encoder_hidden_states=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Tuple= super().prepare_init_args_and_inputs_for_common()
lowercase__ : Optional[int]= 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = AttnUpBlockaD # noqa F405
__lowerCamelCase = "up"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = SkipUpBlockaD # noqa F405
__lowerCamelCase = "up"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = AttnSkipUpBlockaD # noqa F405
__lowerCamelCase = "up"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = UpDecoderBlockaD # noqa F405
__lowerCamelCase = "up"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= {"in_channels": 32, "out_channels": 32}
lowercase__ : Union[str, Any]= self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = AttnUpDecoderBlockaD # noqa F405
__lowerCamelCase = "up"
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= {"in_channels": 32, "out_channels": 32}
lowercase__ : Optional[Any]= self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(snake_case__ )
| 150
|
"""simple docstring"""
from __future__ import annotations
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__=None ):
'''simple docstring'''
lowercase__ : Union[str, Any]= data
lowercase__ : Optional[Any]= None
def __repr__( self ):
'''simple docstring'''
lowercase__ : str= []
lowercase__ : Tuple= self
while temp:
string_rep.append(F'''{temp.data}''' )
lowercase__ : Optional[int]= temp.next
return "->".join(snake_case__ )
def lowercase__(A ) ->Dict:
"""simple docstring"""
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ : Optional[int]= Node(elements_list[0] )
for i in range(1 , len(A ) ):
lowercase__ : Optional[Any]= Node(elements_list[i] )
lowercase__ : str= current.next
return head
def lowercase__(A ) ->None:
"""simple docstring"""
if head_node is not None and isinstance(A , A ):
print_reverse(head_node.next )
print(head_node.data )
def lowercase__() ->str:
"""simple docstring"""
from doctest import testmod
testmod()
lowercase__ : Optional[int]= make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(A )
print("Elements in Reverse:" )
print_reverse(A )
if __name__ == "__main__":
main()
| 150
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_MASKED_LM_MAPPING
def _snake_case ( self ) -> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Dict = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,top_k=2 ,framework="""tf""" )
_UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(a_ ,decimals=6 ) ,[
{"""sequence""": """My name is grouped""", """score""": 2.1E-0_5, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-0_5, """token""": 25_506, """token_str""": """ accuser"""},
] ,)
_UpperCAmelCase : Any = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(a_ ,decimals=6 ) ,[
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-0_5,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-0_5,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] ,)
_UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(a_ ,decimals=6 ) ,[
{"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-0_5, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-0_5, """token""": 2_941, """token_str""": """ Te"""},
] ,)
@require_torch
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,top_k=2 ,framework="""pt""" )
_UpperCAmelCase : Any = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(a_ ,decimals=6 ) ,[
{"""sequence""": """My name is Maul""", """score""": 2.2E-0_5, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-0_5, """token""": 16_416, """token_str""": """ELS"""},
] ,)
_UpperCAmelCase : Any = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(a_ ,decimals=6 ) ,[
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-0_5,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-0_5, """token""": 16_416, """token_str""": """ELS"""},
] ,)
_UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(a_ ,decimals=6 ) ,[
{"""sequence""": """My name is Patrick""", """score""": 2.1E-0_5, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-0_5, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 13_606, """token_str""": """ Clara"""},
] ,)
_UpperCAmelCase : List[str] = unmasker("""My name is <mask> <mask>""" ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=6 ) ,[
[
{
"""score""": 2.2E-0_5,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-0_5, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-0_5,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-0_5, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] ,)
@require_torch_gpu
def _snake_case ( self ) -> Any:
_UpperCAmelCase : int = pipeline("""fill-mask""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,device=0 ,framework="""pt""" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase : Dict = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ ,a_ )
@slow
@require_torch
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = pipeline(task="""fill-mask""" ,model="""distilroberta-base""" ,top_k=2 ,framework="""pt""" )
self.run_large_test(a_ )
@slow
@require_tf
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : Optional[Any] = pipeline(task="""fill-mask""" ,model="""distilroberta-base""" ,top_k=2 ,framework="""tf""" )
self.run_large_test(a_ )
def _snake_case ( self ,a_ ) -> Any:
_UpperCAmelCase : Dict = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(a_ ) ,[
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""},
] ,)
_UpperCAmelCase : List[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(a_ ) ,[
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] ,)
_UpperCAmelCase : str = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(a_ ) ,[
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""},
] ,)
@require_torch
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Union[str, Any] = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,framework="""pt""" )
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[Any] = None
self.run_pipeline_test(a_ ,[] )
@require_tf
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : str = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,framework="""tf""" )
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[Any] = None
self.run_pipeline_test(a_ ,[] )
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_UpperCAmelCase : str = FillMaskPipeline(model=a_ ,tokenizer=a_ )
_UpperCAmelCase : Tuple = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def _snake_case ( self ,a_ ,a_ ) -> str:
_UpperCAmelCase : Union[str, Any] = fill_masker.tokenizer
_UpperCAmelCase : Any = fill_masker.model
_UpperCAmelCase : Any = fill_masker(
f'''This is a {tokenizer.mask_token}''' ,)
self.assertEqual(
a_ ,[
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
] ,)
_UpperCAmelCase : Any = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
a_ ,[
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
] ,)
_UpperCAmelCase : Tuple = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
a_ ,[
[
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
],
[
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
],
] ,)
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("""This is""" )
self.run_test_top_k(a_ ,a_ )
self.run_test_targets(a_ ,a_ )
self.run_test_top_k_targets(a_ ,a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ ,a_ )
self.fill_mask_with_multiple_masks(a_ ,a_ )
def _snake_case ( self ,a_ ,a_ ) -> Optional[int]:
_UpperCAmelCase : str = tokenizer.get_vocab()
_UpperCAmelCase : Union[str, Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase : Dict = FillMaskPipeline(model=a_ ,tokenizer=a_ ,targets=a_ )
_UpperCAmelCase : str = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
a_ ,[
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
] ,)
_UpperCAmelCase : str = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} ,a_ )
_UpperCAmelCase : List[str] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} ,set(a_ ) )
# Call argument
_UpperCAmelCase : Optional[int] = FillMaskPipeline(model=a_ ,tokenizer=a_ )
_UpperCAmelCase : Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=a_ )
self.assertEqual(
a_ ,[
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
] ,)
_UpperCAmelCase : Optional[Any] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} ,a_ )
_UpperCAmelCase : Optional[int] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} ,set(a_ ) )
# Score equivalence
_UpperCAmelCase : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=a_ )
_UpperCAmelCase : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_UpperCAmelCase : Optional[int] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase : str = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=a_ )
_UpperCAmelCase : Optional[int] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) ,nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=[""""""] )
with self.assertRaises(a_ ):
_UpperCAmelCase : Any = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets="""""" )
def _snake_case ( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : List[str] = FillMaskPipeline(model=a_ ,tokenizer=a_ ,top_k=2 )
_UpperCAmelCase : str = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
a_ ,[
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
] ,)
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=a_ ,tokenizer=a_ )
_UpperCAmelCase : int = fill_masker(f'''This is a {tokenizer.mask_token}''' ,top_k=2 )
self.assertEqual(
a_ ,[
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
] ,)
self.assertEqual(nested_simplify(a_ ) ,nested_simplify(a_ ) )
def _snake_case ( self ,a_ ,a_ ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = tokenizer.get_vocab()
_UpperCAmelCase : List[Any] = FillMaskPipeline(model=a_ ,tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase : Tuple = sorted(vocab.keys() )[:3]
_UpperCAmelCase : Dict = fill_masker(f'''This is a {tokenizer.mask_token}''' ,top_k=2 ,targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase : Any = [el["""token_str"""] for el in sorted(a_ ,key=lambda a_ : x["score"] ,reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase : Any = fill_masker(f'''This is a {tokenizer.mask_token}''' ,top_k=3 ,targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) ,nested_simplify(a_ ) )
def _snake_case ( self ,a_ ,a_ ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = FillMaskPipeline(model=a_ ,tokenizer=a_ )
_UpperCAmelCase : Any = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase : Optional[Any] = sorted(vocab.keys() )[:3]
_UpperCAmelCase : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase : str = fill_masker(f'''My name is {tokenizer.mask_token}''' ,targets=a_ ,top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) ,3 )
def _snake_case ( self ,a_ ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = FillMaskPipeline(model=a_ ,tokenizer=a_ )
_UpperCAmelCase : str = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' ,top_k=2 )
self.assertEqual(
a_ ,[
[
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
],
[
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
],
[
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
{"""sequence""": ANY(a_ ), """score""": ANY(a_ ), """token""": ANY(a_ ), """token_str""": ANY(a_ )},
],
] ,)
| 215
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : List[str] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
A_ : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
A_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
A_ : Dict = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
_UpperCAmelCase : Tuple = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowerCAmelCase_ , )
is not None
):
_UpperCAmelCase : Any = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_UpperCAmelCase : List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_UpperCAmelCase : Dict = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
_UpperCAmelCase : int = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
_UpperCAmelCase : Optional[Any] = True
if not attribute_used:
_UpperCAmelCase : List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_UpperCAmelCase : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_UpperCAmelCase : Any = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_UpperCAmelCase : Dict = True
elif attribute.endswith("""_token_id""" ):
_UpperCAmelCase : Optional[int] = True
# configuration class specific cases
if not case_allowed:
_UpperCAmelCase : int = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_UpperCAmelCase : Union[str, Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
_UpperCAmelCase : Optional[int] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
_UpperCAmelCase : Optional[int] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_UpperCAmelCase : List[Any] = {}
if len(config_class.attribute_map ) > 0:
_UpperCAmelCase : Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_UpperCAmelCase : int = inspect.getsourcefile(lowerCAmelCase_ )
_UpperCAmelCase : str = os.path.dirname(lowerCAmelCase_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_UpperCAmelCase : Optional[int] = [os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) for fn in os.listdir(lowerCAmelCase_ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
_UpperCAmelCase : str = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase_ ):
with open(lowerCAmelCase_ ) as fp:
modeling_sources.append(fp.read() )
_UpperCAmelCase : Any = []
for config_param, default_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
# `attributes` here is all the variant names for `config_param`
_UpperCAmelCase : List[str] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase_ )
def snake_case_ ( )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_UpperCAmelCase : List[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCAmelCase_ : inspect.isclass(lowerCAmelCase_ )
and issubclass(lowerCAmelCase_ , lowerCAmelCase_ )
and inspect.getmodule(lowerCAmelCase_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_UpperCAmelCase : Optional[int] = check_config_attributes_being_used(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_UpperCAmelCase : Tuple = unused_attributes
if len(lowerCAmelCase_ ) > 0:
_UpperCAmelCase : Dict = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
check_config_attributes()
| 215
| 1
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE :Tuple = TypeVar('''T''')
class A_ ( Generic[T] ):
def __init__( self : Tuple , snake_case_ : bool = True ):
_UpperCAmelCase = {} # dictionary of lists
_UpperCAmelCase = directed
def lowercase ( self : Union[str, Any] , snake_case_ : T , snake_case_ : T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
self.adj_list[destination_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
_UpperCAmelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case_ )
_UpperCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_UpperCAmelCase = [destination_vertex]
_UpperCAmelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
_UpperCAmelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_UpperCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_UpperCAmelCase = [destination_vertex]
_UpperCAmelCase = []
return self
def __repr__( self : str ):
return pformat(self.adj_list )
| 156
|
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE :Any = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__SCREAMING_SNAKE_CASE :Tuple = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__SCREAMING_SNAKE_CASE :Optional[int] = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowercase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def lowercase ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[int]=None , snake_case_ : str=1 , snake_case_ : str="binary" , snake_case_ : int=None , snake_case_ : List[Any]="warn" , ):
_UpperCAmelCase = recall_score(
snake_case_ , snake_case_ , labels=snake_case_ , pos_label=snake_case_ , average=snake_case_ , sample_weight=snake_case_ , zero_division=snake_case_ , )
return {"recall": float(snake_case_ ) if score.size == 1 else score}
| 156
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : str=7 , lowerCAmelCase : int=True , lowerCAmelCase : str=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]=99 , lowerCAmelCase : str=32 , lowerCAmelCase : Dict=5 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : int=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : str=16 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Tuple=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = self.vocab_size - 1
def __lowercase ( self : str ):
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowercase ( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , *lowerCAmelCase : Tuple ):
lowerCAmelCase = OpenAIGPTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , head_mask=_lowercase )
lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase )
lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , *lowerCAmelCase : List[Any] ):
lowerCAmelCase = OpenAIGPTLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str , *lowerCAmelCase : Dict ):
lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , *lowerCAmelCase : Any ):
lowerCAmelCase = self.num_labels
lowerCAmelCase = OpenAIGPTForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __lowercase ( self : str , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int=False ):
lowerCAmelCase = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase , )
lowerCAmelCase = inputs_dict["""labels"""]
lowerCAmelCase = inputs_dict["""labels"""]
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_lowercase , )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = OpenAIGPTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_lowercase , n_embd=37 )
def __lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __lowercase ( self : int ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_lowercase )
def __lowercase ( self : Any ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowercase )
def __lowercase ( self : int ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_lowercase )
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_lowercase )
@slow
def __lowercase ( self : Union[str, Any] ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = OpenAIGPTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowercase ( self : str ):
lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_lowercase )
lowerCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_lowercase ) # the president is
lowerCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase = model.generate(_lowercase , do_sample=_lowercase )
self.assertListEqual(output_ids[0].tolist() , _lowercase )
| 155
|
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : Tuple , _lowercase : Dict=None , _lowercase : str=None , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ):
if tokenize_kwargs is None:
__UpperCAmelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__UpperCAmelCase = truncation
__UpperCAmelCase = tokenize_kwargs
__UpperCAmelCase = {}
if return_tensors is not None:
__UpperCAmelCase = return_tensors
return preprocess_params, {}, postprocess_params
def a ( self : int , _lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
__UpperCAmelCase = self.framework
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
return model_inputs
def a ( self : List[str] , _lowercase : Tuple ):
__UpperCAmelCase = self.model(**_lowercase )
return model_outputs
def a ( self : int , _lowercase : Tuple , _lowercase : str=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[Any] , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
return super().__call__(*_lowercase , **_lowercase )
| 332
| 0
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Union[str, Any] ):
a__: List[Any] =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a__: Tuple =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_a )
a__: Optional[Any] =-1
a__: str =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
a__: Tuple =model.generate(_a , max_new_tokens=1_0 , do_sample=_a )
a__: List[str] =tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
a__: List[Any] =TextStreamer(_a )
model.generate(_a , max_new_tokens=1_0 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a__: Union[str, Any] =cs.out[:-1]
self.assertEqual(_a , _a )
def _lowerCamelCase ( self : str ):
a__: int =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a__: Tuple =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_a )
a__: Dict =-1
a__: List[Any] =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
a__: List[str] =model.generate(_a , max_new_tokens=1_0 , do_sample=_a )
a__: Union[str, Any] =tokenizer.decode(greedy_ids[0] )
a__: Union[str, Any] =TextIteratorStreamer(_a )
a__: Optional[int] ={"input_ids": input_ids, "max_new_tokens": 1_0, "do_sample": False, "streamer": streamer}
a__: Union[str, Any] =Thread(target=model.generate , kwargs=_a )
thread.start()
a__: Any =""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_a , _a )
def _lowerCamelCase ( self : Optional[Any] ):
a__: str =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a__: Dict =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_a )
a__: List[str] =-1
a__: str =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
a__: List[Any] =model.generate(_a , max_new_tokens=1_0 , do_sample=_a )
a__: Tuple =greedy_ids[:, input_ids.shape[1] :]
a__: List[Any] =tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
a__: Dict =TextStreamer(_a , skip_prompt=_a )
model.generate(_a , max_new_tokens=1_0 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a__: Union[str, Any] =cs.out[:-1]
self.assertEqual(_a , _a )
def _lowerCamelCase ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
a__: Optional[Any] =AutoTokenizer.from_pretrained("distilgpt2" )
a__: str =AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_a )
a__: Any =-1
a__: Optional[int] =torch.ones((1, 5) , device=_a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
a__: Optional[int] =TextStreamer(_a , skip_special_tokens=_a )
model.generate(_a , max_new_tokens=1 , do_sample=_a , streamer=_a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
a__: Union[str, Any] =cs.out[:-1] # Remove the final "\n"
a__: int =tokenizer(_a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _lowerCamelCase ( self : Optional[Any] ):
a__: Optional[Any] =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a__: Optional[Any] =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_a )
a__: Tuple =-1
a__: Optional[int] =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
a__: Any =TextIteratorStreamer(_a , timeout=0.0_0_1 )
a__: Dict ={"input_ids": input_ids, "max_new_tokens": 1_0, "do_sample": False, "streamer": streamer}
a__: int =Thread(target=model.generate , kwargs=_a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_a ):
a__: Tuple =""
for new_text in streamer:
streamer_text += new_text
| 42
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__UpperCAmelCase = random.Random()
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=1.0 , __magic_name__ : Any=None , __magic_name__ : int=None ):
if rng is None:
a__: str =global_rng
a__: Dict =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : str , _a : Any , _a : Union[str, Any]=7 , _a : Any=4_0_0 , _a : Optional[int]=2_0_0_0 , _a : Tuple=1 , _a : str=0.0 , _a : Optional[Any]=1_6_0_0_0 , _a : List[str]=True , _a : Dict=8_0 , _a : List[str]=1_6 , _a : Union[str, Any]=6_4 , _a : Any="hann_window" , _a : Dict=8_0 , _a : int=7_6_0_0 , _a : List[str]=1e-10 , _a : int=True , ):
a__: Tuple =parent
a__: Optional[int] =batch_size
a__: Dict =min_seq_length
a__: Any =max_seq_length
a__: Dict =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: str =feature_size
a__: List[Any] =padding_value
a__: Union[str, Any] =sampling_rate
a__: List[str] =do_normalize
a__: Any =num_mel_bins
a__: Dict =hop_length
a__: Optional[int] =win_length
a__: List[Any] =win_function
a__: int =fmin
a__: List[str] =fmax
a__: List[str] =mel_floor
a__: str =return_attention_mask
def _lowerCamelCase ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowerCamelCase ( self : str , _a : List[str]=False , _a : Optional[Any]=False ):
def _flatten(_a : List[str] ):
return list(itertools.chain(*_a ) )
if equal_length:
a__: int =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a__: Any =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__: Optional[int] =[np.asarray(_a ) for x in speech_inputs]
return speech_inputs
def _lowerCamelCase ( self : int , _a : Union[str, Any]=False , _a : str=False ):
if equal_length:
a__: Union[str, Any] =[floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__: Optional[Any] =[
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__: Optional[int] =[np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase__ ( _a , unittest.TestCase ):
_lowerCAmelCase = SpeechTaFeatureExtractor
def _lowerCamelCase ( self : Optional[Any] ):
a__: List[str] =SpeechTaFeatureExtractionTester(self )
def _lowerCamelCase ( self : Optional[int] , _a : Union[str, Any] ):
self.assertTrue(np.all(np.mean(_a , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a , axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCamelCase ( self : Union[str, Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__: Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__: Union[str, Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Union[str, Any] =[np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
a__: Union[str, Any] =feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a__: List[Any] =feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
a__: Tuple =feat_extract(_a , return_tensors="np" ).input_values
a__: int =feat_extract(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def _lowerCamelCase ( self : int ):
a__: Optional[int] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Union[str, Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Tuple =["longest", "max_length", "do_not_pad"]
a__: List[str] =[None, 1_6_0_0, None]
for max_length, padding in zip(_a , _a ):
a__: Optional[int] =feat_extract(_a , padding=_a , max_length=_a , return_tensors="np" )
a__: Optional[int] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowerCamelCase ( self : Any ):
a__: List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Tuple =range(8_0_0 , 1_4_0_0 , 2_0_0 )
a__: List[Any] =[floats_list((1, x) )[0] for x in lengths]
a__: Optional[int] =["longest", "max_length", "do_not_pad"]
a__: Optional[Any] =[None, 1_6_0_0, None]
for max_length, padding in zip(_a , _a ):
a__: List[Any] =feat_extract(_a , max_length=_a , padding=_a )
a__: Optional[Any] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowerCamelCase ( self : str ):
a__: List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: str =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: int =feat_extract(
_a , truncation=_a , max_length=1_0_0_0 , padding="max_length" , return_tensors="np" )
a__: Any =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCamelCase ( self : Optional[int] ):
a__: Optional[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Dict =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: int =feat_extract(
_a , truncation=_a , max_length=1_0_0_0 , padding="longest" , return_tensors="np" )
a__: int =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
a__: int =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Tuple =feat_extract(
_a , truncation=_a , max_length=2_0_0_0 , padding="longest" , return_tensors="np" )
a__: Optional[int] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowerCamelCase ( self : Any ):
a__: List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Dict =np.random.rand(1_0_0 ).astype(np.floataa )
a__: str =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: int =feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a__: int =feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCamelCase ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__: int =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__: Optional[int] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Any =[np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
a__: Dict =feature_extractor(audio_target=_a , padding=_a , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a__: Tuple =feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a__: str =feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
a__: List[Any] =feature_extractor(_a , return_tensors="np" ).input_values
a__: Union[str, Any] =feature_extractor(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
a__: Optional[int] =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a__: str =np.asarray(_a )
a__: int =feature_extractor(_a , return_tensors="np" ).input_values
a__: int =feature_extractor(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def _lowerCamelCase ( self : str ):
a__: str =self.feat_extract_tester.prepare_inputs_for_target()
a__: Dict =self.feature_extraction_class(**self.feat_extract_dict )
a__: int =feat_extract.model_input_names[0]
a__: Optional[Any] =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a , processed_features[input_name] ) ) )
a__: Any =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
a__: List[str] =BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a__: List[Any] =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__: Tuple =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
a__: Tuple =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
a__: int =self.feature_extraction_class(**self.feat_extract_dict )
a__: Tuple =feat_extract.model_input_names[0]
a__: Optional[int] =BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a__: Optional[int] =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__: Optional[Any] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self : str ):
a__: List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
a__: Tuple =self.feat_extract_tester.prepare_inputs_for_target()
a__: Optional[int] =feat_extract.model_input_names[0]
a__: Optional[int] =BatchFeature({input_name: speech_inputs} )
a__: Tuple =feat_extract.num_mel_bins # hack!
a__: int =feat_extract.pad(_a , padding="longest" , return_tensors="np" )[input_name]
a__: Dict =feat_extract.pad(_a , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCamelCase ( self : List[str] ):
a__: Dict =self.feat_extract_dict
a__: Optional[Any] =True
a__: Any =self.feature_extraction_class(**_a )
a__: List[str] =self.feat_extract_tester.prepare_inputs_for_target()
a__: Optional[Any] =[len(_a ) for x in speech_inputs]
a__: Any =feat_extract.model_input_names[0]
a__: Union[str, Any] =BatchFeature({input_name: speech_inputs} )
a__: List[Any] =feat_extract.num_mel_bins # hack!
a__: Any =feat_extract.pad(_a , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , _a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _a )
def _lowerCamelCase ( self : Any ):
a__: Tuple =self.feat_extract_dict
a__: Dict =True
a__: Tuple =self.feature_extraction_class(**_a )
a__: str =self.feat_extract_tester.prepare_inputs_for_target()
a__: List[str] =[len(_a ) for x in speech_inputs]
a__: Optional[int] =feat_extract.model_input_names[0]
a__: Optional[Any] =BatchFeature({input_name: speech_inputs} )
a__: List[str] =min(_a )
a__: Optional[int] =feat_extract.num_mel_bins # hack!
a__: Optional[Any] =feat_extract.pad(
_a , padding="max_length" , max_length=_a , truncation=_a , return_tensors="np" )
self.assertIn("attention_mask" , _a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowerCamelCase ( self : List[str] , _a : List[str] ):
from datasets import load_dataset
a__: List[Any] =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__: Union[str, Any] =ds.sort("id" ).select(range(_a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self : Tuple ):
# fmt: off
a__: Optional[Any] =torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
a__: Tuple =self._load_datasamples(1 )
a__: Tuple =SpeechTaFeatureExtractor()
a__: Any =feature_extractor(_a , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _a , atol=1e-6 ) )
def _lowerCamelCase ( self : int ):
# fmt: off
a__: List[Any] =torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
a__: List[Any] =self._load_datasamples(1 )
a__: List[str] =SpeechTaFeatureExtractor()
a__: Any =feature_extractor(audio_target=_a , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _a , atol=1e-4 ) )
| 42
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Union[PIL.Image.Image, np.ndarray]
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, ) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=__magic_name__, image_encoder=__magic_name__, image_processor=__magic_name__, scheduler=__magic_name__, renderer=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if latents is None:
UpperCamelCase__ : Dict = randn_tensor(__magic_name__, generator=__magic_name__, device=__magic_name__, dtype=__magic_name__ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCamelCase__ : Union[str, Any] = latents.to(__magic_name__ )
UpperCamelCase__ : Tuple = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self, __magic_name__=0 ) -> Tuple:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCamelCase__ : int = torch.device(f"cuda:{gpu_id}" )
UpperCamelCase__ : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__, __magic_name__ )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder, '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__magic_name__, '''_hf_hook''' )
and hasattr(module._hf_hook, '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, ) -> str:
"""simple docstring"""
if isinstance(__magic_name__, __magic_name__ ) and isinstance(image[0], torch.Tensor ):
UpperCamelCase__ : Any = torch.cat(__magic_name__, axis=0 ) if image[0].ndim == 4 else torch.stack(__magic_name__, axis=0 )
if not isinstance(__magic_name__, torch.Tensor ):
UpperCamelCase__ : Union[str, Any] = self.image_processor(__magic_name__, return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
UpperCamelCase__ : str = image.to(dtype=self.image_encoder.dtype, device=__magic_name__ )
UpperCamelCase__ : List[Any] = self.image_encoder(__magic_name__ )['''last_hidden_state''']
UpperCamelCase__ : Optional[int] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCamelCase__ : Optional[int] = image_embeds.repeat_interleave(__magic_name__, dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ : int = torch.zeros_like(__magic_name__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__magic_name__ )
def __call__( self, __magic_name__, __magic_name__ = 1, __magic_name__ = 25, __magic_name__ = None, __magic_name__ = None, __magic_name__ = 4.0, __magic_name__ = 64, __magic_name__ = "pil", __magic_name__ = True, ) -> Any:
"""simple docstring"""
if isinstance(__magic_name__, PIL.Image.Image ):
UpperCamelCase__ : Dict = 1
elif isinstance(__magic_name__, torch.Tensor ):
UpperCamelCase__ : Tuple = image.shape[0]
elif isinstance(__magic_name__, __magic_name__ ) and isinstance(image[0], (torch.Tensor, PIL.Image.Image) ):
UpperCamelCase__ : Optional[Any] = len(__magic_name__ )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__magic_name__ )}" )
UpperCamelCase__ : List[Any] = self._execution_device
UpperCamelCase__ : List[Any] = batch_size * num_images_per_prompt
UpperCamelCase__ : List[Any] = guidance_scale > 1.0
UpperCamelCase__ : List[str] = self._encode_image(__magic_name__, __magic_name__, __magic_name__, __magic_name__ )
# prior
self.scheduler.set_timesteps(__magic_name__, device=__magic_name__ )
UpperCamelCase__ : List[Any] = self.scheduler.timesteps
UpperCamelCase__ : Optional[Any] = self.prior.config.num_embeddings
UpperCamelCase__ : str = self.prior.config.embedding_dim
UpperCamelCase__ : Tuple = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim), image_embeds.dtype, __magic_name__, __magic_name__, __magic_name__, self.scheduler, )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCamelCase__ : Dict = latents.reshape(latents.shape[0], __magic_name__, __magic_name__ )
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ : Dict = self.scheduler.scale_model_input(__magic_name__, __magic_name__ )
UpperCamelCase__ : Dict = self.prior(
__magic_name__, timestep=__magic_name__, proj_embedding=__magic_name__, ).predicted_image_embedding
# remove the variance
UpperCamelCase__ ,UpperCamelCase__ : str = noise_pred.split(
scaled_model_input.shape[2], dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = noise_pred.chunk(2 )
UpperCamelCase__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCamelCase__ : Optional[Any] = self.scheduler.step(
__magic_name__, timestep=__magic_name__, sample=__magic_name__, ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__magic_name__ )
UpperCamelCase__ : int = []
for i, latent in enumerate(__magic_name__ ):
print()
UpperCamelCase__ : str = self.renderer.decode(
latent[None, :], __magic_name__, size=__magic_name__, ray_batch_size=4096, n_coarse_samples=64, n_fine_samples=128, )
images.append(__magic_name__ )
UpperCamelCase__ : Dict = torch.stack(__magic_name__ )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCamelCase__ : Optional[Any] = images.cpu().numpy()
if output_type == "pil":
UpperCamelCase__ : Optional[Any] = [self.numpy_to_pil(__magic_name__ ) for image in images]
# Offload last model to CPU
if hasattr(self, '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__magic_name__ )
| 201
|
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int ) -> list[int]:
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Tuple = len(__UpperCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase__ : Tuple = i + 1
else:
UpperCamelCase__ : str = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 201
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _lowercase :
a = BlenderbotConfig
a = {}
a = """gelu"""
def __init__( self: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any]=13 , UpperCamelCase__: List[Any]=7 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=False , UpperCamelCase__: Dict=99 , UpperCamelCase__: Union[str, Any]=32 , UpperCamelCase__: Dict=2 , UpperCamelCase__: Union[str, Any]=4 , UpperCamelCase__: int=37 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: Any=20 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: Union[str, Any]=1 , UpperCamelCase__: int=0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[int] = batch_size
lowerCamelCase__ : Union[str, Any] = seq_length
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : int = eos_token_id
lowerCamelCase__ : Optional[int] = pad_token_id
lowerCamelCase__ : Optional[int] = bos_token_id
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase__ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__ : int = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__ : Tuple = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: int ):
lowerCamelCase__ : Optional[Any] = TFBlenderbotModel(config=lowercase_ ).get_decoder()
lowerCamelCase__ : Optional[Any] = inputs_dict["input_ids"]
lowerCamelCase__ : int = input_ids[:1, :]
lowerCamelCase__ : Optional[Any] = inputs_dict["attention_mask"][:1, :]
lowerCamelCase__ : Optional[Any] = inputs_dict["head_mask"]
lowerCamelCase__ : Optional[int] = 1
# first forward pass
lowerCamelCase__ : List[str] = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
lowerCamelCase__ : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase__ : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase__ : Dict = model(lowercase_ , attention_mask=lowercase_ )[0]
lowerCamelCase__ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase__ : Tuple = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
lowerCamelCase__ : int = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase ):
a = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
a = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
a = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
a = True
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = TFBlenderbotModelTester(self )
lowerCamelCase__ : str = ConfigTester(self , config_class=lowercase_ )
def lowerCamelCase_ ( self: Any ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
@require_tokenizers
@require_tf
class _lowercase ( unittest.TestCase ):
a = ["""My friends are cool but they eat too many carbs."""]
a = """facebook/blenderbot-400M-distill"""
@cached_property
def lowerCamelCase_ ( self: str ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.tokenizer(self.src_text , return_tensors="""tf""" )
lowerCamelCase__ : Union[str, Any] = self.model.generate(
model_inputs.input_ids , )
lowerCamelCase__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 351
|
'''simple docstring'''
from collections import deque
class _lowercase :
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ):
lowerCamelCase__ : int = process_name # process name
lowerCamelCase__ : int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase__ : List[str] = arrival_time
lowerCamelCase__ : Tuple = burst_time # remaining burst time
lowerCamelCase__ : str = 0 # total time of the process wait in ready queue
lowerCamelCase__ : Optional[Any] = 0 # time from arrival time to completion time
class _lowercase :
def __init__( self: Any , UpperCamelCase__: int , UpperCamelCase__: list[int] , UpperCamelCase__: deque[Process] , UpperCamelCase__: int , ):
# total number of mlfq's queues
lowerCamelCase__ : Tuple = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase__ : List[Any] = time_slices
# unfinished process is in this ready_queue
lowerCamelCase__ : int = queue
# current time
lowerCamelCase__ : Optional[int] = current_time
# finished process is in this sequence queue
lowerCamelCase__ : deque[Process] = deque()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: list[Process] ):
lowerCamelCase__ : int = []
for i in range(len(UpperCamelCase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCamelCase_ ( self: Any , UpperCamelCase__: list[Process] ):
lowerCamelCase__ : Optional[int] = []
for i in range(len(UpperCamelCase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: list[Process] ):
lowerCamelCase__ : List[Any] = []
for i in range(len(UpperCamelCase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCamelCase_ ( self: int , UpperCamelCase__: deque[Process] ):
return [q.burst_time for q in queue]
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: deque[Process] ):
lowerCamelCase__ : deque[Process] = deque() # sequence deque of finished process
while len(UpperCamelCase__ ) != 0:
lowerCamelCase__ : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCamelCase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase__ : Dict = 0
# set the process's turnaround time because it is finished
lowerCamelCase__ : List[str] = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase__ : int = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: deque[Process] , UpperCamelCase__: int ):
lowerCamelCase__ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCamelCase__ ) ):
lowerCamelCase__ : Optional[int] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCamelCase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase__ : Dict = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCamelCase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase__ : Any = 0
# set the finish time
lowerCamelCase__ : List[Any] = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase__ : Any = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase_ ( self: Tuple ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase__ , lowerCamelCase__ : str = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_A : Optional[Any] =Process('''P1''', 0, 53)
_A : List[Any] =Process('''P2''', 0, 17)
_A : Any =Process('''P3''', 0, 68)
_A : Tuple =Process('''P4''', 0, 24)
_A : int =3
_A : Tuple =[17, 25]
_A : List[Any] =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
_A : Dict =Process('''P1''', 0, 53)
_A : Union[str, Any] =Process('''P2''', 0, 17)
_A : int =Process('''P3''', 0, 68)
_A : Dict =Process('''P4''', 0, 24)
_A : List[str] =3
_A : List[Any] =[17, 25]
_A : Any =deque([Pa, Pa, Pa, Pa])
_A : List[str] =MLFQ(number_of_queues, time_slices, queue, 0)
_A : Union[str, Any] =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 129
| 0
|
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str=1_0_2_4 ) -> Union[str, Any]:
"""simple docstring"""
snake_case ,snake_case = [], []
snake_case = list(zip(_UpperCamelCase , _UpperCamelCase ) )
snake_case ,snake_case = sorted_examples[0]
def is_too_big(_UpperCamelCase : Dict ):
return tok(_UpperCamelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
snake_case = new_src + ' ' + src
snake_case = new_tgt + ' ' + tgt
if is_too_big(_UpperCamelCase ) or is_too_big(_UpperCamelCase ): # cant fit, finalize example
finished_src.append(_UpperCamelCase )
finished_tgt.append(_UpperCamelCase )
snake_case ,snake_case = src, tgt
else: # can fit, keep adding
snake_case ,snake_case = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_UpperCamelCase )
finished_tgt.append(_UpperCamelCase )
return finished_src, finished_tgt
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Path , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case = Path(_UpperCamelCase )
save_path.mkdir(exist_ok=_UpperCamelCase )
for split in ["train"]:
snake_case ,snake_case = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
snake_case = [x.rstrip() for x in Path(_UpperCamelCase ).open().readlines()]
snake_case = [x.rstrip() for x in Path(_UpperCamelCase ).open().readlines()]
snake_case ,snake_case = pack_examples(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
print(f"""packed {split} split from {len(_UpperCamelCase )} examples -> {len(_UpperCamelCase )}.""" )
Path(save_path / f"""{split}.source""" ).open('w' ).write('\n'.join(_UpperCamelCase ) )
Path(save_path / f"""{split}.target""" ).open('w' ).write('\n'.join(_UpperCamelCase ) )
for split in ["val", "test"]:
snake_case ,snake_case = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(_UpperCamelCase , save_path / f"""{split}.source""" )
shutil.copyfile(_UpperCamelCase , save_path / f"""{split}.target""" )
def lowerCAmelCase__ ( ) -> str:
"""simple docstring"""
snake_case = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=_UpperCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=_UpperCamelCase , default=1_2_8 )
parser.add_argument('--data_dir' , type=_UpperCamelCase )
parser.add_argument('--save_path' , type=_UpperCamelCase )
snake_case = parser.parse_args()
snake_case = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_UpperCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 150
|
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
snake_case = XCLIPTextConfig()
# derive patch size from model name
snake_case = model_name.find('patch' )
snake_case = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
snake_case = XCLIPVisionConfig(patch_size=_UpperCamelCase , num_frames=_UpperCamelCase )
if "large" in model_name:
snake_case = 7_6_8
snake_case = 3_0_7_2
snake_case = 1_2
snake_case = 1_0_2_4
snake_case = 4_0_9_6
snake_case = 1_6
snake_case = 2_4
snake_case = 7_6_8
snake_case = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
snake_case = 3_3_6
snake_case = XCLIPConfig.from_text_vision_configs(_UpperCamelCase , _UpperCamelCase )
if "large" in model_name:
snake_case = 7_6_8
return config
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
if name == "token_embedding.weight":
snake_case = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
snake_case = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
snake_case = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
snake_case = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
snake_case = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
snake_case = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
snake_case = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
snake_case = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
snake_case = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
snake_case = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
snake_case = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
snake_case = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
snake_case = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
snake_case = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
snake_case = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
snake_case = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
snake_case = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
snake_case = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
snake_case = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case = orig_state_dict.pop(_UpperCamelCase )
if "attn.in_proj" in key:
snake_case = key.split('.' )
if key.startswith('visual' ):
snake_case = key_split[3]
snake_case = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case = val[
:dim, :
]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[
-dim:, :
]
else:
snake_case = val[
:dim
]
snake_case = val[
dim : dim * 2
]
snake_case = val[
-dim:
]
else:
if "weight" in key:
snake_case = val[
:dim, :
]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[
-dim:, :
]
else:
snake_case = val[:dim]
snake_case = val[
dim : dim * 2
]
snake_case = val[-dim:]
elif key.startswith('mit' ):
snake_case = key_split[2]
snake_case = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[dim : dim * 2, :]
snake_case = val[-dim:, :]
else:
snake_case = val[:dim]
snake_case = val[dim : dim * 2]
snake_case = val[-dim:]
else:
snake_case = key_split[2]
snake_case = config.text_config.hidden_size
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[-dim:, :]
else:
snake_case = val[:dim]
snake_case = val[
dim : dim * 2
]
snake_case = val[-dim:]
else:
snake_case = rename_key(_UpperCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case = val.T
snake_case = val
return orig_state_dict
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
snake_case = 'eating_spaghetti_8_frames.npy'
elif num_frames == 1_6:
snake_case = 'eating_spaghetti.npy'
elif num_frames == 3_2:
snake_case = 'eating_spaghetti_32_frames.npy'
snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=_UpperCamelCase , repo_type='dataset' , )
snake_case = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Tuple=None , _UpperCamelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
snake_case = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
snake_case = model_to_url[model_name]
snake_case = 8
if "16-frames" in model_name:
snake_case = 1_6
elif "shot" in model_name:
snake_case = 3_2
snake_case = get_xclip_config(_UpperCamelCase , _UpperCamelCase )
snake_case = XCLIPModel(_UpperCamelCase )
model.eval()
if "drive" in checkpoint_url:
snake_case = 'pytorch_model.bin'
gdown.cached_download(_UpperCamelCase , _UpperCamelCase , quiet=_UpperCamelCase )
snake_case = torch.load(_UpperCamelCase , map_location='cpu' )['model']
else:
snake_case = torch.hub.load_state_dict_from_url(_UpperCamelCase )['model']
snake_case = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
snake_case = XCLIPModel(_UpperCamelCase )
snake_case ,snake_case = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case = 3_3_6 if model_name == 'xclip-large-patch14-16-frames' else 2_2_4
snake_case = VideoMAEImageProcessor(size=_UpperCamelCase )
snake_case = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
snake_case = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
snake_case = XCLIPProcessor(image_processor=_UpperCamelCase , tokenizer=_UpperCamelCase )
snake_case = prepare_video(_UpperCamelCase )
snake_case = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=_UpperCamelCase , return_tensors='pt' , padding=_UpperCamelCase )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
snake_case = model(**_UpperCamelCase )
# Verify outputs
snake_case = outputs.logits_per_video
snake_case = logits_per_video.softmax(dim=1 )
print('Probs:' , _UpperCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
snake_case = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
snake_case = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(_UpperCamelCase , organization='nielsr' )
processor.push_to_hub(_UpperCamelCase , organization='nielsr' )
slow_tokenizer.push_to_hub(_UpperCamelCase , organization='nielsr' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 150
| 1
|
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCamelCase__ ( _A , _A , _A=[] ):
a : Any = size[0] - overlap_pixels * 2
a : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
a : Optional[int] = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
a : Optional[int] = np.pad(_A , mode='linear_ramp' , pad_width=_A , end_values=0 )
if "l" in remove_borders:
a : List[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
a : int = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
a : int = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
a : Optional[int] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCamelCase__ ( _A , _A , _A ):
return max(_A , min(_A , _A ) )
def lowerCamelCase__ ( _A , _A , _A ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[int] = list(_A )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
a : Optional[int] = clamp_rect(_A , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCamelCase__ ( _A , _A , _A , _A ):
a : Optional[int] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_A , (original_slice, 0) )
return result
def lowerCamelCase__ ( _A , _A ):
a : Any = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
a : Optional[Any] = tile.crop(_A )
return tile
def lowerCamelCase__ ( _A , _A ):
a : Dict = n % d
return n - divisor
class a__( lowerCamelCase__ ):
def __init__( self : Tuple , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __snake_case : int = 3_50 , ):
super().__init__(
vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , )
def lowercase_ ( self : List[Any] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : List[str] , **__snake_case : List[str] ):
torch.manual_seed(0 )
a : Tuple = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
a : Dict = add_overlap_rect(__snake_case , __snake_case , image.size )
a : Tuple = image.crop(__snake_case )
a : Tuple = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
a : List[str] = translated_slice_x - (original_image_slice / 2)
a : Dict = max(0 , __snake_case )
a : Optional[int] = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case )
a : Tuple = to_input.size
a : Union[str, Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
a : Any = super(__snake_case , self ).__call__(image=__snake_case , **__snake_case ).images[0]
a : str = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
a : str = unsqueeze_tile(__snake_case , __snake_case )
a : Dict = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
a : Union[str, Any] = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
a : Optional[Any] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case ) , mode='L' , )
final_image.paste(
__snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case )
@torch.no_grad()
def __call__( self : Tuple , __snake_case : Union[str, List[str]] , __snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] , __snake_case : int = 75 , __snake_case : float = 9.0 , __snake_case : int = 50 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , __snake_case : int = 1_28 , __snake_case : int = 32 , __snake_case : int = 32 , ):
a : Tuple = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
a : Optional[int] = math.ceil(image.size[0] / tile_size )
a : Tuple = math.ceil(image.size[1] / tile_size )
a : Tuple = tcx * tcy
a : Dict = 0
for y in range(__snake_case ):
for x in range(__snake_case ):
self._process_tile(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def lowerCamelCase__ ( ):
# Run a demo
a : List[str] = 'stabilityai/stable-diffusion-x4-upscaler'
a : Optional[int] = StableDiffusionTiledUpscalePipeline.from_pretrained(_A , revision='fp16' , torch_dtype=torch.floataa )
a : Dict = pipe.to('cuda' )
a : Optional[int] = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(_A ):
print(f"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('diffusers_library_progress.jpg' )
a : List[Any] = pipe(image=_A , prompt='Black font, white background, vector' , noise_level=40 , callback=_A )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 96
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a__( lowerCamelCase__ ):
lowercase__ = 42
@flax_register_to_config
class a__( nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = 32
lowercase__ = 4
lowercase__ = 4
lowercase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowercase__ = False
lowercase__ = (3_20, 6_40, 12_80, 12_80)
lowercase__ = 2
lowercase__ = 8
lowercase__ = None
lowercase__ = 12_80
lowercase__ = 0.0
lowercase__ = False
lowercase__ = jnp.floataa
lowercase__ = True
lowercase__ = 0
lowercase__ = False
def lowercase_ ( self : List[Any] , __snake_case : jax.random.KeyArray ):
# init input tensors
a : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
a : str = jnp.zeros(__snake_case , dtype=jnp.floataa )
a : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
a : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a , a : Optional[int] = jax.random.split(__snake_case )
a : Any = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__snake_case , __snake_case , __snake_case , __snake_case )["params"]
def lowercase_ ( self : Union[str, Any] ):
a : int = self.block_out_channels
a : Tuple = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a : str = self.num_attention_heads or self.attention_head_dim
# input
a : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a : Optional[int] = FlaxTimestepEmbedding(__snake_case , dtype=self.dtype )
a : Optional[Any] = self.only_cross_attention
if isinstance(__snake_case , __snake_case ):
a : int = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__snake_case , __snake_case ):
a : str = (num_attention_heads,) * len(self.down_block_types )
# down
a : int = []
a : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
a : List[Any] = output_channel
a : Dict = block_out_channels[i]
a : Union[str, Any] = i == len(__snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a : Dict = FlaxCrossAttnDownBlockaD(
in_channels=__snake_case , out_channels=__snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a : List[str] = FlaxDownBlockaD(
in_channels=__snake_case , out_channels=__snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__snake_case )
a : Dict = down_blocks
# mid
a : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
a : List[Any] = []
a : Dict = list(reversed(__snake_case ) )
a : List[str] = list(reversed(__snake_case ) )
a : List[str] = list(reversed(__snake_case ) )
a : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
a : Dict = output_channel
a : Dict = reversed_block_out_channels[i]
a : Dict = reversed_block_out_channels[min(i + 1 , len(__snake_case ) - 1 )]
a : List[Any] = i == len(__snake_case ) - 1
if up_block_type == "CrossAttnUpBlock2D":
a : List[Any] = FlaxCrossAttnUpBlockaD(
in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a : Dict = FlaxUpBlockaD(
in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__snake_case )
a : Dict = output_channel
a : int = up_blocks
# out
a : int = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
a : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Dict=None , __snake_case : Any=None , __snake_case : bool = True , __snake_case : bool = False , ):
# 1. time
if not isinstance(__snake_case , jnp.ndarray ):
a : Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
a : Tuple = timesteps.astype(dtype=jnp.floataa )
a : Optional[int] = jnp.expand_dims(__snake_case , 0 )
a : Optional[Any] = self.time_proj(__snake_case )
a : Optional[int] = self.time_embedding(__snake_case )
# 2. pre-process
a : str = jnp.transpose(__snake_case , (0, 2, 3, 1) )
a : Optional[int] = self.conv_in(__snake_case )
# 3. down
a : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(__snake_case , __snake_case ):
a , a : Any = down_block(__snake_case , __snake_case , __snake_case , deterministic=not train )
else:
a , a : Optional[Any] = down_block(__snake_case , __snake_case , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
a : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
__snake_case , __snake_case ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
a : Tuple = new_down_block_res_samples
# 4. mid
a : int = self.mid_block(__snake_case , __snake_case , __snake_case , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
a : int = down_block_res_samples[-(self.layers_per_block + 1) :]
a : List[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__snake_case , __snake_case ):
a : Optional[int] = up_block(
__snake_case , temb=__snake_case , encoder_hidden_states=__snake_case , res_hidden_states_tuple=__snake_case , deterministic=not train , )
else:
a : Optional[Any] = up_block(__snake_case , temb=__snake_case , res_hidden_states_tuple=__snake_case , deterministic=not train )
# 6. post-process
a : Any = self.conv_norm_out(__snake_case )
a : List[str] = nn.silu(__snake_case )
a : Optional[int] = self.conv_out(__snake_case )
a : Any = jnp.transpose(__snake_case , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__snake_case )
| 96
| 1
|
from __future__ import annotations
import bisect
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = -1 ) -> int:
if hi < 0:
__lowercase : Optional[Any] = len(__lowerCAmelCase )
while lo < hi:
__lowercase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowercase : int = mid + 1
else:
__lowercase : int = mid
return lo
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = -1 ) -> int:
if hi < 0:
__lowercase : Optional[Any] = len(__lowerCAmelCase )
while lo < hi:
__lowercase : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowercase : Optional[Any] = mid + 1
else:
__lowercase : Tuple = mid
return lo
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = -1 ) -> None:
sorted_collection.insert(bisect_left(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = -1 ) -> None:
sorted_collection.insert(bisect_right(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int | None:
__lowercase : int = 0
__lowercase : str = len(__lowerCAmelCase ) - 1
while left <= right:
__lowercase : int = left + (right - left) // 2
__lowercase : Tuple = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowercase : Tuple = midpoint - 1
else:
__lowercase : Dict = midpoint + 1
return None
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int | None:
__lowercase : Union[str, Any] = bisect.bisect_left(__lowerCAmelCase , __lowerCAmelCase )
if index != len(__lowerCAmelCase ) and sorted_collection[index] == item:
return index
return None
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int | None:
if right < left:
return None
__lowercase : Optional[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , midpoint + 1 , __lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = input("Enter numbers separated by comma:\n").strip()
__lowerCAmelCase : Any = sorted(int(item) for item in user_input.split(","))
__lowerCAmelCase : str = int(input("Enter a single number to be found in the list:\n"))
__lowerCAmelCase : List[str] = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 156
|
from collections import deque
from .hash_table import HashTable
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *_snake_case : Union[str, Any] , **_snake_case : Union[str, Any] ):
super().__init__(*_snake_case , **_snake_case )
def snake_case_ ( self : List[Any] , _snake_case : List[Any] , _snake_case : Dict ):
__lowercase : Any = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_snake_case )
__lowercase : List[Any] = self.values[key]
def snake_case_ ( self : Any ):
return (
sum(self.charge_factor - len(_snake_case ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case_ ( self : int , _snake_case : str , _snake_case : Optional[int]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_snake_case ) == 0
):
return key
return super()._collision_resolution(_snake_case , _snake_case )
| 156
| 1
|
from collections import Counter
from timeit import timeit
def snake_case (UpperCAmelCase__ = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def snake_case (UpperCAmelCase__ = "" ) -> bool:
if len(UpperCAmelCase__ ) == 0:
return True
UpperCamelCase_: Tuple = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCamelCase_: dict[str, int] = {}
for character in lower_case_input_str:
UpperCamelCase_: Any = character_freq_dict.get(UpperCAmelCase__ , 0 ) + 1
UpperCamelCase_: int = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def snake_case (UpperCAmelCase__ = "" ) -> None:
print('\nFor string = ' , UpperCAmelCase__ , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(UpperCAmelCase__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
A_ : Union[str, Any] = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
A_ : Any = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 361
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case (UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: int = [False] * len(UpperCAmelCase__ )
UpperCamelCase_: Any = [-1] * len(UpperCAmelCase__ )
def dfs(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Tuple = True
UpperCamelCase_: Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCAmelCase__ , 1 - c )
for i in range(len(UpperCAmelCase__ ) ):
if not visited[i]:
dfs(UpperCAmelCase__ , 0 )
for i in range(len(UpperCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
A_ : Dict = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292
| 0
|
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE__ ( __A ) -> list:
_snake_case = [True] * n
_snake_case = False
_snake_case = False
_snake_case = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_snake_case = i * 2
while index < n:
_snake_case = False
_snake_case = index + i
_snake_case = [2]
for i in range(3 , __A , 2 ):
if is_prime[i]:
primes.append(__A )
return primes
def SCREAMING_SNAKE_CASE__ ( __A = 999_966_663_333 ) -> int:
_snake_case = math.floor(math.sqrt(__A ) ) + 100
_snake_case = prime_sieve(__A )
_snake_case = 0
_snake_case = 0
_snake_case = primes[prime_index]
while (last_prime**2) <= limit:
_snake_case = primes[prime_index + 1]
_snake_case = last_prime**2
_snake_case = next_prime**2
# Get numbers divisible by lps(current)
_snake_case = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_snake_case = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_snake_case = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_snake_case = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 42
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["ChineseCLIPFeatureExtractor"]
lowercase : List[Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 1
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: np.ndarray , __a: Union[int, Iterable[int]] , __a: bool , __a: int ):
def constraint_to_multiple_of(__a: Union[str, Any] , __a: Dict , __a: List[str]=0 , __a: List[Any]=None ):
lowerCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase_ = (output_size, output_size) if isinstance(__a , __a ) else output_size
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(__a )
lowerCAmelCase_ , lowerCAmelCase_ = output_size
# determine new height and width
lowerCAmelCase_ = output_height / input_height
lowerCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase_ = scale_width
else:
# fit height
lowerCAmelCase_ = scale_height
lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=__a )
lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=__a )
return (new_height, new_width)
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''pixel_values''']
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = False , _a = 1 , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ) -> None:
super().__init__(**_a )
lowerCAmelCase_ = size if size is not None else {"height": 384, "width": 384}
lowerCAmelCase_ = get_size_dict(_a )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self , _a , _a , _a = False , _a = 1 , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
lowerCAmelCase_ = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
lowerCAmelCase_ = get_resize_output_image_size(
_a , output_size=(size["height"], size["width"]) , keep_aspect_ratio=_a , multiple=_a , )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __a ( self , _a , _a , _a = None , **_a , ) -> str:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __a ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __a ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(_a )
lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(_a ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(_a , _a ) for image in images]
lowerCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=_a , tensor_type=_a )
def __a ( self , _a , _a = None ) -> Dict:
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_a ):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(_a ) ):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_a )
lowerCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowerCAmelCase_ = logits.argmax(dim=1 )
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22
|
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22
| 1
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
lowercase : List[Any] = parser.parse_args()
lowercase : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase : Any = CLIPImageProcessor()
lowercase : Any = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
lowercase : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 20
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""Speech2TextFeatureExtractor"""
snake_case_ ="""Speech2TextTokenizer"""
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> str:
"""simple docstring"""
super().__init__(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : int = self.feature_extractor
lowerCAmelCase__ : List[str] = False
def __call__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase ,**__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase__ : str = kwargs.pop('''audio''' ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = kwargs.pop('''sampling_rate''' ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = kwargs.pop('''text''' ,__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCAmelCase__ : Union[str, Any] = args[0]
lowerCAmelCase__ : str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase__ : str = self.feature_extractor(__lowerCamelCase ,*__lowerCamelCase ,sampling_rate=__lowerCamelCase ,**__lowerCamelCase )
if text is not None:
lowerCAmelCase__ : Any = self.tokenizer(__lowerCamelCase ,**__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ : str = encodings['''input_ids''']
return inputs
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase )
@contextmanager
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Union[str, Any] = self.tokenizer
yield
lowerCAmelCase__ : List[str] = self.feature_extractor
lowerCAmelCase__ : Any = False
| 129
| 0
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__snake_case : Optional[int] = HUGGINGFACE_HUB_CACHE
__snake_case : Tuple = """config.json"""
__snake_case : Dict = """diffusion_pytorch_model.bin"""
__snake_case : Union[str, Any] = """diffusion_flax_model.msgpack"""
__snake_case : Optional[Any] = """model.onnx"""
__snake_case : Optional[int] = """diffusion_pytorch_model.safetensors"""
__snake_case : Union[str, Any] = """weights.pb"""
__snake_case : List[str] = """https://huggingface.co"""
__snake_case : int = default_cache_path
__snake_case : Union[str, Any] = """diffusers_modules"""
__snake_case : str = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
__snake_case : Optional[Any] = ["""fp16""", """non-ema"""]
__snake_case : Union[str, Any] = """.self_attn"""
| 122
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__snake_case : Optional[int] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__snake_case : List[str] = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
__snake_case : Dict = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
def UpperCamelCase__ ( self ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , ):
"""simple docstring"""
lowerCAmelCase__ = len(references[0] )
if any(len(_UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase__ = [[refs[i] for refs in references] for i in range(_UpperCamelCase )]
lowerCAmelCase__ = TER(
normalized=_UpperCamelCase , no_punct=_UpperCamelCase , asian_support=_UpperCamelCase , case_sensitive=_UpperCamelCase , )
lowerCAmelCase__ = sb_ter.corpus_score(_UpperCamelCase , _UpperCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 122
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Dict = tempfile.mkdtemp()
_lowerCamelCase : Union[str, Any] = BlipImageProcessor()
_lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
_lowerCamelCase : str = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
_lowerCamelCase : Optional[Any] = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def A_ ( self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def A_ ( self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def A_ ( self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def A_ ( self ):
shutil.rmtree(self.tmpdirname )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCamelCase : int = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self ):
_lowerCamelCase : Dict = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCamelCase : str = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
_lowerCamelCase : Union[str, Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def A_ ( self ):
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : List[Any] = self.get_qformer_tokenizer()
_lowerCamelCase : List[Any] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Optional[Any] = image_processor(lowercase , return_tensors='np' )
_lowerCamelCase : Any = processor(images=lowercase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A_ ( self ):
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_qformer_tokenizer()
_lowerCamelCase : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
_lowerCamelCase : Dict = 'lower newer'
_lowerCamelCase : Dict = processor(text=lowercase )
_lowerCamelCase : Optional[Any] = tokenizer(lowercase , return_token_type_ids=lowercase )
_lowerCamelCase : Any = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def A_ ( self ):
_lowerCamelCase : str = self.get_image_processor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_qformer_tokenizer()
_lowerCamelCase : Union[str, Any] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
_lowerCamelCase : Tuple = 'lower newer'
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def A_ ( self ):
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_qformer_tokenizer()
_lowerCamelCase : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
_lowerCamelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : List[Any] = processor.batch_decode(lowercase )
_lowerCamelCase : str = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : str = self.get_qformer_tokenizer()
_lowerCamelCase : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
_lowerCamelCase : int = 'lower newer'
_lowerCamelCase : str = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 96
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """ctrl"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=246534 , lowercase=256 , lowercase=1280 , lowercase=8192 , lowercase=48 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-6 , lowercase=0.02 , lowercase=True , **lowercase , ):
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Dict = n_positions
_lowerCamelCase : Optional[int] = n_embd
_lowerCamelCase : str = n_layer
_lowerCamelCase : Union[str, Any] = n_head
_lowerCamelCase : Any = dff
_lowerCamelCase : int = resid_pdrop
_lowerCamelCase : Dict = embd_pdrop
_lowerCamelCase : Union[str, Any] = layer_norm_epsilon
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : str = use_cache
super().__init__(**lowercase )
| 96
| 1
|
'''simple docstring'''
__UpperCamelCase = range(2, 20 + 1)
__UpperCamelCase = [10**k for k in range(ks[-1] + 1)]
__UpperCamelCase = {}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = sum(a_i[j] for j in range(_lowerCamelCase , len(_lowerCamelCase ) ) )
__snake_case : Dict = sum(a_i[j] * base[j] for j in range(min(len(_lowerCamelCase ) , _lowerCamelCase ) ) )
__snake_case : int = 0, 0
__snake_case : List[str] = n - i
__snake_case : Optional[Any] = memo.get(_lowerCamelCase )
if sub_memo is not None:
__snake_case : int = sub_memo.get(_lowerCamelCase )
if jumps is not None and len(_lowerCamelCase ) > 0:
# find and make the largest jump without going over
__snake_case : Optional[int] = -1
for _k in range(len(_lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__snake_case : Dict = _k
break
if max_jump >= 0:
__snake_case : Any = jumps[max_jump]
# since the difference between jumps is cached, add c
__snake_case : int = diff + c
for j in range(min(_lowerCamelCase , len(_lowerCamelCase ) ) ):
__snake_case : int = divmod(_lowerCamelCase , 10 )
if new_c > 0:
add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case : List[str] = []
else:
__snake_case : int = {c: []}
__snake_case : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__snake_case : str = next_term(_lowerCamelCase , k - 1 , i + dn , _lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__snake_case : List[str] = compute(_lowerCamelCase , _lowerCamelCase , i + dn , _lowerCamelCase )
diff += _diff
dn += terms_jumped
__snake_case : Union[str, Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
__snake_case : Union[str, Any] = 0
while j < len(_lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__snake_case : List[Any] = i
__snake_case : str = 0, 0, 0
for j in range(len(_lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__snake_case : Dict = ds_c + ds_b
diff += addend
__snake_case : List[Any] = 0
for j in range(_lowerCamelCase ):
__snake_case : str = a_i[j] + addend
__snake_case : Any = divmod(_lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return diff, i - start_i
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case : Optional[Any] = digits[j] + addend
if s >= 10:
__snake_case : List[str] = divmod(_lowerCamelCase , 10 )
__snake_case : List[Any] = addend // 10 + quotient
else:
__snake_case : int = s
__snake_case : Optional[int] = addend // 10
if addend == 0:
break
while addend > 0:
__snake_case : Union[str, Any] = divmod(_lowerCamelCase , 10 )
digits.append(_lowerCamelCase )
def _a ( _lowerCamelCase = 10**15 ) -> int:
"""simple docstring"""
__snake_case : Dict = [1]
__snake_case : Dict = 1
__snake_case : List[Any] = 0
while True:
__snake_case : Any = next_term(_lowerCamelCase , 20 , i + dn , _lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__snake_case : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 366
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__snake_case : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__snake_case : Optional[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__snake_case : Dict = output[output != -float("""inf""" )]
__snake_case : Optional[Any] = tf.cast(
tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@require_tf
class _A ( unittest.TestCase , __lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase__: Tuple = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Optional[int] = 2
__snake_case : str = 2
class _A ( tf.Module ):
def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : int = [[2, 0], [1_02, 1_03]]
__snake_case : Tuple = [[1, 0], [1, 1]]
__snake_case : Union[str, Any] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(__magic_name__ ) + 1 ):
__snake_case : Union[str, Any] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
__snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Dict = 1
__snake_case : int = 2
class _A ( tf.Module ):
def __init__( self : Tuple , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : Union[str, Any] = [[2], [1_02, 1_03]]
__snake_case : Tuple = [[1], [1, 1]]
__snake_case : List[str] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for input_row in range(len(__magic_name__ ) ):
__snake_case : Tuple = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
__snake_case : str = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
@require_tensorflow_text
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ )
class _A ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
super().__init__()
__snake_case : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() )
__snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ )
__snake_case , __snake_case : List[Any] = text.pad_model_inputs(
__magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ )
return self.tokenizer.detokenize(__magic_name__ )
__snake_case : int = CompleteSentenceTransformer()
__snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__snake_case : Tuple = complete_model(__magic_name__ )
__snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ )
keras_model.save(__magic_name__ )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
__snake_case : str = 14
__snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : int = """Hello, my dog is cute and"""
__snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" )
__snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : List[Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__snake_case : Dict = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : str = """Hugging Face is a technology company based in New York and Paris."""
__snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids
__snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : int = bart_model.generate(__magic_name__ ).numpy()
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) )
class _A ( bart_model.model.encoder.__class__ ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
__snake_case : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__snake_case : Dict = bart_model.generate(__magic_name__ ).numpy()
with self.assertRaises(__magic_name__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__magic_name__ , foo="""bar""" )
| 13
| 0
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCamelCase__ = threading.Lock()
UpperCamelCase__ = None
UpperCamelCase__ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
UpperCamelCase__ = logging.WARNING
UpperCamelCase__ = True
def _a ( ):
__lowerCAmelCase = os.getenv("TRANSFORMERS_VERBOSITY" , SCREAMING_SNAKE_CASE_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def _a ( ):
return __name__.split("." )[0]
def _a ( ):
return logging.getLogger(_get_library_name() )
def _a ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__lowerCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
__lowerCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
__lowerCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__lowerCAmelCase = False
def _a ( ):
global _default_handler
with _lock:
if not _default_handler:
return
__lowerCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__lowerCAmelCase = None
def _a ( ):
return log_levels
def _a ( SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if name is None:
__lowerCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(SCREAMING_SNAKE_CASE_ )
def _a ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _a ( SCREAMING_SNAKE_CASE_ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def _a ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _a ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _a ( SCREAMING_SNAKE_CASE_ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(SCREAMING_SNAKE_CASE_ )
def _a ( ):
_configure_library_root_logger()
__lowerCAmelCase = False
def _a ( ):
_configure_library_root_logger()
__lowerCAmelCase = True
def _a ( ):
__lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
__lowerCAmelCase = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(SCREAMING_SNAKE_CASE_ )
def _a ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , SCREAMING_SNAKE_CASE_ )
if no_advisory_warnings:
return
self.warning(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = warning_advice
@functools.lru_cache(SCREAMING_SNAKE_CASE_ )
def _a ( self : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
self.warning(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = warning_once
class a__ :
def __init__( self , *_A , **_A ): # pylint: disable=unused-argument
"""simple docstring"""
__lowerCAmelCase = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , _A ):
"""simple docstring"""
def empty_fn(*_A , **_A ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , _A , _A , _A ):
"""simple docstring"""
return
class a__ :
def __call__( self , *_A , **_A ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
__lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCamelCase__ = _tqdm_cls()
def _a ( ):
global _tqdm_active
return bool(_tqdm_active )
def _a ( ):
global _tqdm_active
__lowerCAmelCase = True
hf_hub_utils.enable_progress_bars()
def _a ( ):
global _tqdm_active
__lowerCAmelCase = False
hf_hub_utils.disable_progress_bars()
| 92
|
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292
| 0
|
"""simple docstring"""
import math
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase=0 ): # a graph with Node 0,1,...,N-1
__a : Tuple = n
__a : Tuple = [
[math.inf for j in range(0 , _UpperCAmelCase )] for i in range(0 , _UpperCAmelCase )
] # adjacency matrix for weight
__a : Union[str, Any] = [
[math.inf for j in range(0 , _UpperCAmelCase )] for i in range(0 , _UpperCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : int = w
def _lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__a : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
return self.dp[u][v]
if __name__ == "__main__":
A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 355
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _lowerCamelCase ( self , _UpperCAmelCase=0 ):
__a : Tuple = floats_tensor((1, 3, 128, 128) , rng=random.Random(_UpperCAmelCase ) )
__a : Any = np.random.RandomState(_UpperCAmelCase )
__a : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Dict = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__a : List[Any] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[int] = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# warmup pass to apply optimizations
__a : Any = pipe(**self.get_dummy_inputs() )
__a : List[str] = self.get_dummy_inputs()
__a : Tuple = pipe(**_UpperCAmelCase ).images
__a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : int = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[Any] = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = self.get_dummy_inputs()
__a : str = pipe(**_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[int] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self ):
__a : Optional[Any] = ort.SessionOptions()
__a : Any = False
return options
def _lowerCamelCase ( self ):
__a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Tuple = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Tuple = '''A fantasy landscape, trending on artstation'''
__a : Tuple = np.random.RandomState(0 )
__a : int = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : List[Any] = output.images
__a : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : Any = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCamelCase ( self ):
__a : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Tuple = init_image.resize((768, 512) )
__a : str = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[str] = '''A fantasy landscape, trending on artstation'''
__a : str = np.random.RandomState(0 )
__a : str = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : Dict = output.images
__a : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : Dict = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 188
| 0
|
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> List[Any]:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : int , __lowercase : int ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = TextDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_text_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : Tuple ) -> str:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"text": "string"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = TextDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read()
_check_text_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Any , __lowercase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"text": "string"}
_UpperCAmelCase = TextDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read()
_check_text_dataset(__lowercase , __lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> int:
'''simple docstring'''
if issubclass(__lowercase , __lowercase ):
_UpperCAmelCase = text_path
elif issubclass(__lowercase , __lowercase ):
_UpperCAmelCase = [text_path]
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"text": "string"}
_UpperCAmelCase = TextDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_text_dataset(__lowercase , __lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : Optional[Any]=("train",) ) -> List[str]:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = TextDatasetReader({"train": text_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_text_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : int , __lowercase : Dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_UpperCAmelCase = {"text": "string"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = TextDatasetReader({"train": text_path} , features=__lowercase , cache_dir=__lowercase ).read()
_check_text_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : Any ) -> Tuple:
'''simple docstring'''
if split:
_UpperCAmelCase = {split: text_path}
else:
_UpperCAmelCase = "train"
_UpperCAmelCase = {"train": text_path, "test": text_path}
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"text": "string"}
_UpperCAmelCase = TextDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_text_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 22
|
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("\n" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowercase ))
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 22
| 1
|
import logging
import os
from .state import PartialState
class __UpperCAmelCase ( logging.LoggerAdapter ):
@staticmethod
def __magic_name__ ( __A : str ):
UpperCAmelCase : Dict = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __magic_name__ ( self : Union[str, Any], __A : Union[str, Any], __A : Union[str, Any], *__A : Optional[int], **__A : Tuple ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
UpperCAmelCase : List[str] = kwargs.pop('''main_process_only''', __A )
UpperCAmelCase : int = kwargs.pop('''in_order''', __A )
if self.isEnabledFor(__A ):
if self._should_log(__A ):
UpperCAmelCase , UpperCAmelCase : Dict = self.process(__A, __A )
self.logger.log(__A, __A, *__A, **__A )
elif in_order:
UpperCAmelCase : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.process(__A, __A )
self.logger.log(__A, __A, *__A, **__A )
state.wait_for_everyone()
def a__ ( UpperCAmelCase : str , UpperCAmelCase : str = None ) -> Dict:
if log_level is None:
UpperCAmelCase : Union[str, Any] = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCAmelCase )
UpperCAmelCase : Tuple = logging.getLogger(UpperCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCAmelCase , {} )
| 99
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any], __A : str, __A : List[str]=7, __A : List[str]=3, __A : Optional[int]=1_8, __A : List[Any]=3_0, __A : Tuple=4_0_0, __A : Tuple=True, __A : List[Any]=None, __A : str=True, __A : int=None, __A : Optional[Any]=True, __A : List[Any]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], __A : List[str]=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], __A : Tuple=True, ):
UpperCAmelCase : int = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
UpperCAmelCase : int = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Dict = image_size
UpperCAmelCase : List[str] = min_resolution
UpperCAmelCase : Optional[Any] = max_resolution
UpperCAmelCase : Union[str, Any] = do_resize
UpperCAmelCase : Dict = size
UpperCAmelCase : Any = do_center_crop
UpperCAmelCase : Union[str, Any] = crop_size
UpperCAmelCase : List[str] = do_normalize
UpperCAmelCase : Optional[Any] = image_mean
UpperCAmelCase : Optional[Any] = image_std
UpperCAmelCase : List[Any] = do_convert_rgb
def __magic_name__ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __magic_name__ ( self : Optional[Any], __A : Any=False, __A : str=False, __A : List[Any]=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCAmelCase : Dict = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uinta ) )
else:
UpperCAmelCase : Tuple = []
for i in range(self.batch_size ):
UpperCAmelCase , UpperCAmelCase : Tuple = np.random.choice(np.arange(self.min_resolution, self.max_resolution ), 2 )
image_inputs.append(np.random.randint(2_5_5, size=(self.num_channels, width, height), dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__A, 0, -1 ) ) for x in image_inputs]
if torchify:
UpperCAmelCase : str = [torch.from_numpy(__A ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Optional[Any] = ChineseCLIPImageProcessingTester(self, do_center_crop=__A )
@property
def __magic_name__ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A, '''do_resize''' ) )
self.assertTrue(hasattr(__A, '''size''' ) )
self.assertTrue(hasattr(__A, '''do_center_crop''' ) )
self.assertTrue(hasattr(__A, '''center_crop''' ) )
self.assertTrue(hasattr(__A, '''do_normalize''' ) )
self.assertTrue(hasattr(__A, '''image_mean''' ) )
self.assertTrue(hasattr(__A, '''image_std''' ) )
self.assertTrue(hasattr(__A, '''do_convert_rgb''' ) )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 2_2_4, '''width''': 2_2_4} )
self.assertEqual(image_processor.crop_size, {'''height''': 1_8, '''width''': 1_8} )
UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size, {'''height''': 8_4, '''width''': 8_4} )
def __magic_name__ ( self : Union[str, Any] ):
pass
def __magic_name__ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A, Image.Image )
# Test not batched input
UpperCAmelCase : str = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : Any = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def __magic_name__ ( self : Optional[Any] ):
# Initialize image_processing
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__A, numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A, np.ndarray )
# Test not batched input
UpperCAmelCase : List[str] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : Dict = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def __magic_name__ ( self : Any ):
# Initialize image_processing
UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__A, torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A, torch.Tensor )
# Test not batched input
UpperCAmelCase : str = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : List[str] = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
@require_torch
@require_vision
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=__A )
UpperCAmelCase : Dict = 3
@property
def __magic_name__ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A, '''do_resize''' ) )
self.assertTrue(hasattr(__A, '''size''' ) )
self.assertTrue(hasattr(__A, '''do_center_crop''' ) )
self.assertTrue(hasattr(__A, '''center_crop''' ) )
self.assertTrue(hasattr(__A, '''do_normalize''' ) )
self.assertTrue(hasattr(__A, '''image_mean''' ) )
self.assertTrue(hasattr(__A, '''image_std''' ) )
self.assertTrue(hasattr(__A, '''do_convert_rgb''' ) )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A, Image.Image )
# Test not batched input
UpperCAmelCase : Tuple = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : Tuple = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 99
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Dict = """instructblip_vision_model"""
def __init__( self , __UpperCamelCase=1_4_0_8 , __UpperCamelCase=6_1_4_4 , __UpperCamelCase=3_9 , __UpperCamelCase=1_6 , __UpperCamelCase=2_2_4 , __UpperCamelCase=1_4 , __UpperCamelCase="gelu" , __UpperCamelCase=1e-6 , __UpperCamelCase=0.0 , __UpperCamelCase=1e-10 , __UpperCamelCase=True , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = patch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = hidden_act
UpperCamelCase_ = qkv_bias
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
UpperCamelCase_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Union[str, Any] = """instructblip_qformer"""
def __init__( self , __UpperCamelCase=3_0_5_2_2 , __UpperCamelCase=7_6_8 , __UpperCamelCase=1_2 , __UpperCamelCase=1_2 , __UpperCamelCase=3_0_7_2 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=0.02 , __UpperCamelCase=1e-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=2 , __UpperCamelCase=1_4_0_8 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = cross_attention_frequency
UpperCamelCase_ = encoder_hidden_size
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
UpperCamelCase_ = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = """instructblip"""
A__ : Dict = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=3_2 , **__UpperCamelCase ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if vision_config is None:
UpperCamelCase_ = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
UpperCamelCase_ = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
UpperCamelCase_ = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
UpperCamelCase_ = InstructBlipVisionConfig(**__UpperCamelCase )
UpperCamelCase_ = InstructBlipQFormerConfig(**__UpperCamelCase )
UpperCamelCase_ = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
UpperCamelCase_ = CONFIG_MAPPING[text_model_type](**__UpperCamelCase )
UpperCamelCase_ = self.text_config.tie_word_embeddings
UpperCamelCase_ = self.text_config.is_encoder_decoder
UpperCamelCase_ = num_query_tokens
UpperCamelCase_ = self.vision_config.hidden_size
UpperCamelCase_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCamelCase_ = 1.0
UpperCamelCase_ = 0.02
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCamelCase , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.qformer_config.to_dict()
UpperCamelCase_ = self.text_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 122
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=3_0 , __UpperCamelCase=4_0_0 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 2_5_5 , __UpperCamelCase=True , ):
"""simple docstring"""
UpperCamelCase_ = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_pad
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
if not batched:
UpperCamelCase_ = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
UpperCamelCase_ , UpperCamelCase_ = image.size
else:
UpperCamelCase_ , UpperCamelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase_ = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase_ = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = self.size["""shortest_edge"""]
else:
UpperCamelCase_ = []
for image in image_inputs:
UpperCamelCase_ , UpperCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : str = YolosImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = YolosImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
UpperCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_ = self.image_processing_class(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase , do_rescale=__UpperCamelCase )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCamelCase_ = image_processing_a.pad(__UpperCamelCase , return_tensors="""pt""" )
UpperCamelCase_ = image_processing_a(__UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
UpperCamelCase_ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
UpperCamelCase_ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase_ = YolosImageProcessor(format="""coco_panoptic""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
UpperCamelCase_ = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
| 122
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ["image_processor", "tokenizer"]
lowerCAmelCase__ = "ViTImageProcessor"
lowerCAmelCase__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __snake_case , )
lowercase_ = kwargs.pop("feature_extractor" )
lowercase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__snake_case , __snake_case )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowercase_ = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if visual_prompt is not None:
lowercase_ = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if images is not None:
lowercase_ = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if visual_prompt is not None and images is not None:
lowercase_ = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase_ = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __snake_case , )
return self.image_processor_class
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __snake_case , )
return self.image_processor
| 358
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297
| 0
|
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase_ = """"""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class _SCREAMING_SNAKE_CASE( tr.AbstractTransform ):
def __init__( self ,SCREAMING_SNAKE_CASE__ = " " ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = sentence_delimiter
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
return list(lowerCAmelCase__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = []
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowerCamelCase_ = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowerCamelCase_ = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE( datasets.Metric ):
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/jitsi/jiwer/'''] ,reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ) -> Dict:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ ,lowerCAmelCase__ ,truth_transform=lowerCAmelCase__ ,hypothesis_transform=lowerCAmelCase__ ,)["wer"]
__SCREAMING_SNAKE_CASE :Union[str, Any] = 0
__SCREAMING_SNAKE_CASE :Any = 0
for prediction, reference in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE :List[Any] = jiwer.compute_measures(
lowerCAmelCase__ ,lowerCAmelCase__ ,truth_transform=lowerCAmelCase__ ,hypothesis_transform=lowerCAmelCase__ ,)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 191
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there")
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
SCREAMING_SNAKE_CASE_: List[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
SCREAMING_SNAKE_CASE_: Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
with self.assertRaises(lowerCAmelCase__):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Any = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE_: str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Union[str, Any] = "<pad>"
SCREAMING_SNAKE_CASE_: Tuple = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
| 13
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
'''simple docstring'''
def __init__( self : str , lowercase__ : Dict , lowercase__ : Optional[Any]=3 , lowercase__ : Any=32 , lowercase__ : Optional[int]=3 , lowercase__ : Any=10 , lowercase__ : List[str]=[10, 20, 30, 40] , lowercase__ : List[str]=[1, 1, 2, 1] , lowercase__ : Optional[int]=True , lowercase__ : int=True , lowercase__ : Any="relu" , lowercase__ : List[Any]=3 , lowercase__ : Union[str, Any]=None , ):
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embeddings_size
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = len(_SCREAMING_SNAKE_CASE)
def __snake_case ( self : str):
'''simple docstring'''
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels)
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : List[str]):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __snake_case ( self : List[str] , lowercase__ : int , lowercase__ : str , lowercase__ : List[str]):
'''simple docstring'''
lowerCAmelCase__ = TFResNetModel(config=_SCREAMING_SNAKE_CASE)
lowerCAmelCase__ = model(_SCREAMING_SNAKE_CASE)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : int , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Dict):
'''simple docstring'''
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFResNetForImageClassification(_SCREAMING_SNAKE_CASE)
lowerCAmelCase__ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase_ = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = TFResNetModelTester(self)
lowerCAmelCase__ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE)
def __snake_case ( self : List[str]):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Optional[int]):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds')
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings')
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
pass
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(_SCREAMING_SNAKE_CASE)
lowerCAmelCase__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE)
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def __snake_case ( self : Any):
'''simple docstring'''
def check_hidden_states_output(lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Dict):
lowerCAmelCase__ = model_class(_SCREAMING_SNAKE_CASE)
lowerCAmelCase__ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , expected_num_stages + 1)
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase__ = layer_type
lowerCAmelCase__ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE)
@slow
def __snake_case ( self : Optional[int]):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFResNetModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
def __lowerCamelCase ( ):
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : Tuple):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='tf')
# forward pass
lowerCAmelCase__ = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE)
lowerCAmelCase__ = tf.constant([-11.1_069, -9.7_877, -8.3_777])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _SCREAMING_SNAKE_CASE , atol=1e-4))
| 368
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __lowerCamelCase ( ):
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=lowerCAmelCase__ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=lowerCAmelCase__ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=lowerCAmelCase__ , default=4_2 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=lowerCAmelCase__ , default=0 , help='cuda_id.' , )
lowerCAmelCase__ = parser.parse_args()
return args
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not len(lowerCAmelCase__ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase__ , lowerCAmelCase__ = imgs[0].size
lowerCAmelCase__ = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase__ , lowerCAmelCase__ = grid.size
for i, img in enumerate(lowerCAmelCase__ ):
grid.paste(lowerCAmelCase__ , box=(i % cols * w, i // cols * h) )
return grid
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__="robotic cat with wings" , lowerCAmelCase__=7.5 , lowerCAmelCase__=5_0 , lowerCAmelCase__=1 , lowerCAmelCase__=4_2 , ):
lowerCAmelCase__ = torch.Generator(pipeline.device ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase__ = pipeline(
lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , ).images
lowerCAmelCase__ = int(math.sqrt(lowerCAmelCase__ ) )
lowerCAmelCase__ = image_grid(lowerCAmelCase__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCAmelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCAmelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowerCAmelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowerCAmelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowerCAmelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCAmelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowerCAmelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowerCAmelCase__ = unet.to(torch.device('cuda', args.cuda_id))
lowerCAmelCase__ = pipeline.to(unet.device)
lowerCAmelCase__ , lowerCAmelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowerCAmelCase__ = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 119
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = IFInpaintingSuperResolutionPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
def snake_case ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowercase_ )
else:
_lowerCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
_lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case ( self ):
"""simple docstring"""
self._test_save_load_local()
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 82
|
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : List[str] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Dict , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : str ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[int] , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
| 188
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'mgp-str'
def __init__( self , __snake_case=[3_2, 1_2_8] , __snake_case=4 , __snake_case=3 , __snake_case=2_7 , __snake_case=3_8 , __snake_case=5_0_2_5_7 , __snake_case=3_0_5_2_2 , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=4.0 , __snake_case=True , __snake_case=False , __snake_case=1E-5 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=False , __snake_case=0.02 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = max_token_length
snake_case = num_character_labels
snake_case = num_bpe_labels
snake_case = num_wordpiece_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = mlp_ratio
snake_case = distilled
snake_case = layer_norm_eps
snake_case = drop_rate
snake_case = qkv_bias
snake_case = attn_drop_rate
snake_case = drop_path_rate
snake_case = output_aa_attentions
snake_case = initializer_range
| 213
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=9_9 , __snake_case=6_4 , __snake_case=5 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=1_6 , __snake_case=2 , __snake_case=0.02 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
snake_case = vocab_size - 1
def a_ ( self ):
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = self.get_config()
return config, input_ids, input_mask, token_labels
def a_ ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.prepare_config_and_inputs()
snake_case = True
return config, input_ids, input_mask, token_labels
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = GPTNeoXModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = True
snake_case = GPTNeoXModel(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = GPTNeoXForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = self.num_labels
snake_case = GPTNeoXForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = self.num_labels
snake_case = GPTNeoXForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = self.num_labels
snake_case = GPTNeoXForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = True
snake_case = GPTNeoXForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
# first forward pass
snake_case = model(__snake_case , attention_mask=__snake_case , use_cache=__snake_case )
snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case = model(__snake_case , attention_mask=__snake_case , output_hidden_states=__snake_case )
snake_case = output_from_no_past['''hidden_states'''][0]
snake_case = model(
__snake_case , attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = GPTNeoXModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , hidden_size=6_4 , num_attention_heads=8 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
# This regression test was failing with PyTorch < 1.3
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case = None
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def a_ ( self ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a_ ( self , __snake_case ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = ids_tensor([1, 1_0] , config.vocab_size )
snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case = GPTNeoXModel(__snake_case )
original_model.to(__snake_case )
original_model.eval()
snake_case = original_model(__snake_case ).last_hidden_state
snake_case = original_model(__snake_case ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case = {'''type''': scaling_type, '''factor''': 10.0}
snake_case = GPTNeoXModel(__snake_case )
scaled_model.to(__snake_case )
scaled_model.eval()
snake_case = scaled_model(__snake_case ).last_hidden_state
snake_case = scaled_model(__snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a_ ( self ):
snake_case = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
snake_case = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__snake_case )
snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__snake_case )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
snake_case = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=2_0 )
snake_case = tokenizer.batch_decode(__snake_case )[0]
self.assertEqual(__snake_case , __snake_case )
| 213
| 1
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=2 , lowercase=32 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=4 , lowercase=[0, 1, 2, 3] , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=3 , lowercase=[1, 384, 24, 24] , lowercase=True , lowercase=None , ) -> Dict:
'''simple docstring'''
a__ : List[str] = parent
a__ : Optional[Any] = batch_size
a__ : Optional[int] = image_size
a__ : List[Any] = patch_size
a__ : List[Any] = num_channels
a__ : Any = is_training
a__ : Dict = use_labels
a__ : Dict = hidden_size
a__ : Tuple = num_hidden_layers
a__ : int = backbone_out_indices
a__ : int = num_attention_heads
a__ : Any = intermediate_size
a__ : Optional[Any] = hidden_act
a__ : Tuple = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : int = initializer_range
a__ : Optional[int] = num_labels
a__ : int = backbone_featmap_shape
a__ : Dict = scope
a__ : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
a__ : List[str] = (image_size // patch_size) ** 2
a__ : str = num_patches + 1
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : int = None
if self.use_labels:
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a__ : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = DPTModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : str = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : str = self.num_labels
a__ : Dict = DPTForDepthEstimation(lowercase)
model.to(lowercase)
model.eval()
a__ : Dict = model(lowercase)
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : List[Any] = self.num_labels
a__ : str = DPTForSemanticSegmentation(lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase , labels=lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
a__ , a__ , a__ : Any = config_and_inputs
a__ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__A : Tuple = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : str = False
__A : Optional[int] = False
__A : int = False
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = DPTModelTester(self)
a__ : Optional[int] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds')
def __lowercase ( self) -> List[str]:
'''simple docstring'''
pass
def __lowercase ( self) -> str:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Tuple = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[Any] = model_class(lowercase)
a__ : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Tuple = [*signature.parameters.keys()]
a__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = True
if model_class in get_values(lowercase):
continue
a__ : Optional[int] = model_class(lowercase)
model.to(lowercase)
model.train()
a__ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase)
a__ : Optional[Any] = model(**lowercase).loss
loss.backward()
def __lowercase ( self) -> str:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a__ , a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Optional[Any] = False
a__ : Any = True
if model_class in get_values(lowercase) or not model_class.supports_gradient_checkpointing:
continue
a__ : Tuple = model_class(lowercase)
model.to(lowercase)
model.gradient_checkpointing_enable()
model.train()
a__ : List[Any] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase)
a__ : Any = model(**lowercase).loss
loss.backward()
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : int = _config_zero_init(lowercase)
for model_class in self.all_model_classes:
a__ : List[str] = model_class(config=lowercase)
# Skip the check for the backbone
a__ : Dict = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
a__ : Optional[int] = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
a__ : Dict = DPTModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Optional[int] = 'add'
with self.assertRaises(lowercase):
a__ : str = DPTForDepthEstimation(lowercase)
def A_ ( ) -> Any:
a__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : str = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas')
a__ : List[Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas').to(lowercase)
a__ : Dict = prepare_img()
a__ : Any = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__ : str = model(**lowercase)
a__ : List[Any] = outputs.predicted_depth
# verify the predicted depth
a__ : Optional[int] = torch.Size((1, 384, 384))
self.assertEqual(predicted_depth.shape , lowercase)
a__ : Optional[Any] = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]]).to(lowercase)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , lowercase , atol=1e-4))
| 99
|
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase : Any = TypeVar("""_T""")
class A__ ( Generic[_T] ):
"""simple docstring"""
def __init__( self , lowercase = None) -> None:
'''simple docstring'''
a__ : list[_T] = list(iterable or [])
a__ : list[_T] = []
def __len__( self) -> int:
'''simple docstring'''
return len(self._stacka) + len(self._stacka)
def __repr__( self) -> str:
'''simple docstring'''
return F'Queue({tuple(self._stacka[::-1] + self._stacka)})'
def __lowercase ( self , lowercase) -> None:
'''simple docstring'''
self._stacka.append(lowercase)
def __lowercase ( self) -> _T:
'''simple docstring'''
a__ : List[str] = self._stacka.pop
a__ : Optional[int] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop())
if not self._stacka:
raise IndexError('Queue is empty')
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 99
| 1
|
def snake_case_ ( snake_case ) -> Dict:
lowercase__: str = []
lowercase__: str = []
lowercase__: List[str] = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowercase__: Optional[int] = len(snake_case ) if (len(snake_case ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(snake_case ) , 'Postfix'.center(snake_case ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(snake_case ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(snake_case ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(snake_case ) == 0:
stack.append(snake_case ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(snake_case ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(snake_case ) # push x to stack
print(
x.center(8 ) , (''.join(snake_case )).ljust(snake_case ) , (''.join(snake_case )).ljust(snake_case ) , sep=' | ' , ) # Output in tabular format
while len(snake_case ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(snake_case )).ljust(snake_case ) , (''.join(snake_case )).ljust(snake_case ) , sep=' | ' , ) # Output in tabular format
return "".join(snake_case ) # return Postfix as str
def snake_case_ ( snake_case ) -> int:
lowercase__: Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(snake_case ) ):
if infix[i] == "(":
lowercase__: str = ')' # change "(" to ")"
elif infix[i] == ")":
lowercase__: Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(snake_case ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__lowerCAmelCase = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
__lowerCAmelCase = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 288
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '''▁'''
__lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__lowerCAmelCase = {'''vinai/bartpho-syllable''': 10_24}
class __a ( __UpperCamelCase ):
__lowercase : int = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
lowercase__: Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
lowercase__: Dict = vocab_file
lowercase__: str = monolingual_vocab_file
lowercase__: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__: List[Any] = {}
lowercase__: Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
lowercase__: str = cnt
cnt += 1
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
lowercase__: Optional[Any] = line.strip().split()[0]
lowercase__: Optional[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
lowercase__: Optional[int] = len(self.fairseq_tokens_to_ids )
lowercase__: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = self.__dict__.copy()
lowercase__: Tuple = None
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__: Union[str, Any] = {}
lowercase__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: Optional[int] = [self.cls_token_id]
lowercase__: Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: Dict = [self.sep_token_id]
lowercase__: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = ''.join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: int = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCAmelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(lowerCAmelCase__ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 288
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89
|
'''simple docstring'''
from __future__ import annotations
import math
class a__:
def __init__( self : List[str] , __snake_case : int ):
a : str = size
# approximate the overall size of segment tree with given value
a : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
a : Any = [0 for i in range(0 , 4 * size )]
a : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self : int , __snake_case : int ):
return idx * 2
def lowercase_ ( self : Dict , __snake_case : int ):
return idx * 2 + 1
def lowercase_ ( self : Dict , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ):
if left_element == right_element:
a : Tuple = a[left_element - 1]
else:
a : Tuple = (left_element + right_element) // 2
self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case )
self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case )
a : Union[str, Any] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : int = self.lazy[idx]
a : Union[str, Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : int = self.lazy[idx]
a : Tuple = True
a : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
a : int = val
if left_element != right_element:
a : int = val
a : Dict = val
a : List[str] = True
a : List[str] = True
return True
a : Tuple = (left_element + right_element) // 2
self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case )
a : Optional[int] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
return True
def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : str = self.lazy[idx]
a : Optional[Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : Union[str, Any] = self.lazy[idx]
a : Dict = True
a : int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
a : Dict = (left_element + right_element) // 2
a : Optional[int] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case )
return max(__snake_case , __snake_case )
def __str__( self : Any ):
return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
lowerCAmelCase: int = 1_5
lowerCAmelCase: Optional[int] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 297
| 0
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowerCamelCase : List[Any] = logging.getLogger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Any = """token-classification"""
def __init__( self , A_ ):
'''simple docstring'''
if type(_lowercase ) == dict:
UpperCamelCase : List[str] = Namespace(**_lowercase )
UpperCamelCase : Optional[Any] = import_module("tasks" )
try:
UpperCamelCase : Optional[Any] = getattr(_lowercase , hparams.task_type )
UpperCamelCase : Optional[int] = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
UpperCamelCase : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase : Optional[int] = CrossEntropyLoss().ignore_index
super().__init__(_lowercase , len(self.labels ) , self.mode )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return self.model(**_lowercase )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase : List[str] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase : str = self(**_lowercase )
UpperCamelCase : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase : Any = self._feature_file(_lowercase )
if os.path.exists(_lowercase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , _lowercase )
UpperCamelCase : Optional[Any] = torch.load(_lowercase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
UpperCamelCase : Union[str, Any] = self.token_classification_task.read_examples_from_file(args.data_dir , _lowercase )
UpperCamelCase : List[str] = self.token_classification_task.convert_examples_to_features(
_lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , _lowercase )
torch.save(_lowercase , _lowercase )
def __UpperCamelCase( self , A_ , A_ , A_ = False ):
'''simple docstring'''
UpperCamelCase : str = self._feature_file(_lowercase )
logger.info("Loading features from cached file %s" , _lowercase )
UpperCamelCase : str = torch.load(_lowercase )
UpperCamelCase : Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase : Dict = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(_lowercase , _lowercase , _lowercase , _lowercase ) , batch_size=_lowercase )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
"""Compute validation""" ""
UpperCamelCase : Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase : Optional[int] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase : List[Any] = self(**_lowercase )
UpperCamelCase , UpperCamelCase : Optional[Any] = outputs[:2]
UpperCamelCase : List[Any] = logits.detach().cpu().numpy()
UpperCamelCase : Optional[int] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = torch.stack([x["val_loss"] for x in outputs] ).mean()
UpperCamelCase : List[str] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
UpperCamelCase : int = np.argmax(_lowercase , axis=2 )
UpperCamelCase : Optional[int] = np.concatenate([x["target"] for x in outputs] , axis=0 )
UpperCamelCase : str = dict(enumerate(self.labels ) )
UpperCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase : int = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase : Tuple = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
UpperCamelCase : List[Any] = dict(results.items() )
UpperCamelCase : Tuple = results
return ret, preds_list, out_label_list
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = self._eval_end(_lowercase )
UpperCamelCase : Tuple = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = self._eval_end(_lowercase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase : Dict = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(_lowercase , _lowercase )
parser.add_argument(
"--task_type" , default="NER" , type=_lowercase , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=128 , type=_lowercase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=_lowercase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=_lowercase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowerCamelCase : Any = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase : Any = parser.parse_args()
__lowerCamelCase : Any = NERTransformer(args)
__lowerCamelCase : List[Any] = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
__lowerCamelCase : List[str] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 366
|
__lowerCamelCase : Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def A_ ( _lowerCAmelCase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : str = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_lowerCAmelCase )
UpperCamelCase : Optional[int] = "".join(bin(_lowerCAmelCase )[2:].zfill(8 ) for byte in data )
UpperCamelCase : str = len(_lowerCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase : Optional[Any] = b"=" * ((6 - len(_lowerCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowerCAmelCase ) % 6)
else:
UpperCamelCase : List[Any] = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowerCAmelCase ) , 6 ) ).encode()
+ padding
)
def A_ ( _lowerCAmelCase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : List[Any] = (
"argument should be a bytes-like object or ASCII string, "
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_lowerCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
UpperCamelCase : Any = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
UpperCamelCase : Union[str, Any] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase : List[str] = encoded_data[:-padding]
UpperCamelCase : List[Any] = "".join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCamelCase : List[Any] = "".join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCamelCase : Any = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowerCAmelCase ) , 8 )
]
return bytes(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = tempfile.mkdtemp()
# fmt: off
__A = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__A = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
__A = os.path.join(self.tmpdirname ,A )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(A ,A )
def UpperCamelCase_ ( self : int ,**A : List[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : int ,**A : Optional[Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Dict ):
__A = self.get_tokenizer()
__A = self.get_image_processor()
__A = VisionTextDualEncoderProcessor(tokenizer=A ,image_processor=A )
processor.save_pretrained(self.tmpdirname )
__A = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = VisionTextDualEncoderProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = VisionTextDualEncoderProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : Any ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = VisionTextDualEncoderProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(A ):
processor()
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = VisionTextDualEncoderProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = VisionTextDualEncoderProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 15
|
import argparse
import json
from tqdm import tqdm
def UpperCamelCase ( ) -> Optional[int]:
UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=snake_case__ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=snake_case__ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=snake_case__ , help='where to store parsed gold_data_path file' , )
UpperCamelCase : int = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCamelCase : int = json.load(snake_case__ )
for dpr_record in tqdm(snake_case__ ):
UpperCamelCase : Union[str, Any] = dpr_record['question']
UpperCamelCase : Dict = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(snake_case__ ) + '\n' )
if __name__ == "__main__":
main()
| 119
| 0
|
import re
from filelock import FileLock
try:
import nltk
UpperCamelCase_ = True
except (ImportError, ModuleNotFoundError):
UpperCamelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def lowerCamelCase_ ( _a : str ):
'''simple docstring'''
re.sub("""<n>""" , """""" , _a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_a ) )
| 359
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger()
@dataclass
class _snake_case :
'''simple docstring'''
A__ : nn.Module
A__ : List[nn.Module] = field(default_factory=__snake_case )
A__ : list = field(default_factory=__snake_case )
def A__ ( self: str ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tensor ,lowerCamelCase_: Tensor ) -> Optional[int]:
UpperCAmelCase_ : Dict = len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase_ ,nn.Convad ) or isinstance(lowerCamelCase_ ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase_ )
def __call__( self: Tuple ,lowerCamelCase_: Tensor ) -> Dict:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def A__ ( self: List[str] ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCamelCase_ : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : nn.Module
A__ : nn.Module
A__ : int = 1
A__ : List = field(default_factory=__snake_case )
A__ : List = field(default_factory=__snake_case )
A__ : bool = True
def __call__( self: Tuple ,lowerCamelCase_: Tensor ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = Tracker(self.dest )(lowerCamelCase_ ).parametrized
UpperCAmelCase_ : Any = Tracker(self.src )(lowerCamelCase_ ).parametrized
UpperCAmelCase_ : int = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.src_skip ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.dest_skip ,lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCamelCase_ )} operations while'''
F''' destination module has {len(lowerCamelCase_ )}.''' )
for dest_m, src_m in zip(lowerCamelCase_ ,lowerCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] ,lowerCamelCase_: nn.Module ) -> List[str]:
super().__init__()
UpperCAmelCase_ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), F'''Unexpected layer name {k}'''
UpperCAmelCase_ : Tuple = len(lowerCamelCase_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
UpperCAmelCase_ : Optional[int] = nn.ModuleDict(lowerCamelCase_ )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tensor ) -> List[str]:
return get_trunk_forward_outputs(
lowerCamelCase_ ,out_feat_keys=lowerCamelCase_ ,feature_blocks=self._feature_blocks ,)
class _snake_case ( __snake_case ):
'''simple docstring'''
def A__ ( self: Dict ,lowerCamelCase_: str ) -> str:
UpperCAmelCase_ : str = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self: Union[str, Any] ,lowerCamelCase_: str ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
UpperCAmelCase_ : str = self.convert_name_to_timm(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = partial(lambda: (timm.create_model(lowerCamelCase_ ,pretrained=lowerCamelCase_ ).eval(), None) )
else:
UpperCAmelCase_ : Optional[int] = super().__getitem__(lowerCamelCase_ )
return val
class _snake_case ( __snake_case ):
'''simple docstring'''
def __getitem__( self: Union[str, Any] ,lowerCamelCase_: str ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
UpperCAmelCase_ : Tuple = RegNetModel
else:
UpperCAmelCase_ : Union[str, Any] = RegNetForImageClassification
return val
def lowerCamelCase_ ( _a : str , _a : int , _a : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
UpperCAmelCase_ : int = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def lowerCamelCase_ ( _a : str , _a : Callable[[], nn.Module] , _a : Callable[[], nn.Module] , _a : RegNetConfig , _a : Path , _a : bool = True , ):
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ : Any = from_model_func()
UpperCAmelCase_ : str = our_model_func(_a ).eval()
UpperCAmelCase_ : List[Any] = ModuleTransfer(src=_a , dest=_a , raise_if_mismatch=_a )
UpperCAmelCase_ : List[str] = torch.randn((1, 3, 224, 224) )
module_transfer(_a )
if from_state_dict is not None:
UpperCAmelCase_ : List[str] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase_ : List[Any] = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
UpperCAmelCase_ : str = manually_copy_vissl_head(_a , our_model.state_dict() , _a )
our_model.load_state_dict(_a )
UpperCAmelCase_ : Union[str, Any] = our_model(_a , output_hidden_states=_a )
UpperCAmelCase_ : int = (
our_outputs.logits if isinstance(_a , _a ) else our_outputs.last_hidden_state
)
UpperCAmelCase_ : Optional[int] = from_model(_a )
UpperCAmelCase_ : List[Any] = from_output[-1] if type(_a ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase_ : Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(_a , _a ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=_a , )
UpperCAmelCase_ : Union[str, Any] = 224 if """seer""" not in name else 384
# we can use the convnext one
UpperCAmelCase_ : Optional[int] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=_a )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=_a , )
print(F'''Pushed {name}''' )
def lowerCamelCase_ ( _a : Path , _a : str = None , _a : bool = True ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
UpperCAmelCase_ : List[Any] = 1000
UpperCAmelCase_ : Any = (1, num_labels)
UpperCAmelCase_ : Tuple = """huggingface/label-files"""
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : List[str] = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type="""dataset""" ) ) , """r""" ) )
UpperCAmelCase_ : Union[str, Any] = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Union[str, Any] = partial(_a , num_labels=_a , idalabel=_a , labelaid=_a )
UpperCAmelCase_ : List[Any] = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
UpperCAmelCase_ : List[Any] = NameToOurModelFuncMap()
UpperCAmelCase_ : Union[str, Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_a : str , _a : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase_ : Optional[Any] = torch.hub.load_state_dict_from_url(_a , model_dir=str(_a ) , map_location="""cpu""" )
UpperCAmelCase_ : Union[str, Any] = model_func()
# check if we have a head, if yes add it
UpperCAmelCase_ : Optional[Any] = files["""classy_state_dict"""]["""base_model"""]["""model"""]
UpperCAmelCase_ : Optional[Any] = model_state_dict["""trunk"""]
model.load_state_dict(_a )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase_ : List[Any] = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : str = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Tuple = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ : Optional[int] = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
UpperCAmelCase_ : Dict = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Dict = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Any = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ : List[Any] = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _a , _a , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _a , _a , _a , )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 59
| 0
|
"""simple docstring"""
from __future__ import annotations
import queue
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = data
lowercase_ : List[str] = None
lowercase_ : Optional[int] = None
def lowercase__( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowercase_ : str = input('Enter the value of the root node: ' ).strip().lower()
lowercase_ : queue.Queue = queue.Queue()
lowercase_ : str = TreeNode(int(__SCREAMING_SNAKE_CASE ) )
q.put(__SCREAMING_SNAKE_CASE )
while not q.empty():
lowercase_ : List[str] = q.get()
lowercase_ : Optional[Any] = F'''Enter the left node of {node_found.data}: '''
lowercase_ : Union[str, Any] = input(__SCREAMING_SNAKE_CASE ).strip().lower() or 'n'
if check == "n":
return tree_node
lowercase_ : str = TreeNode(int(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[int] = left_node
q.put(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = F'''Enter the right node of {node_found.data}: '''
lowercase_ : Optional[int] = input(__SCREAMING_SNAKE_CASE ).strip().lower() or 'n'
if check == "n":
return tree_node
lowercase_ : Any = TreeNode(int(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Any = right_node
q.put(__SCREAMING_SNAKE_CASE )
raise
def lowercase__( __SCREAMING_SNAKE_CASE : TreeNode ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __SCREAMING_SNAKE_CASE : TreeNode ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowercase__( __SCREAMING_SNAKE_CASE : TreeNode ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowercase__( __SCREAMING_SNAKE_CASE : TreeNode ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not node:
return
lowercase_ : queue.Queue = queue.Queue()
q.put(__SCREAMING_SNAKE_CASE )
while not q.empty():
lowercase_ : Union[str, Any] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __SCREAMING_SNAKE_CASE : TreeNode ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not node:
return
lowercase_ : queue.Queue = queue.Queue()
q.put(__SCREAMING_SNAKE_CASE )
while not q.empty():
lowercase_ : Union[str, Any] = []
while not q.empty():
lowercase_ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : TreeNode ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not node:
return
lowercase_ : list[TreeNode] = []
lowercase_ : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowercase_ : Optional[Any] = stack.pop()
# start to traverse its right child
lowercase_ : Any = n.right
def lowercase__( __SCREAMING_SNAKE_CASE : TreeNode ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not node:
return
lowercase_ : list[TreeNode] = []
lowercase_ : List[Any] = node
while n or stack:
while n:
stack.append(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = n.left
lowercase_ : Dict = stack.pop()
print(n.data , end=',' )
lowercase_ : Optional[int] = n.right
def lowercase__( __SCREAMING_SNAKE_CASE : TreeNode ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not node:
return
lowercase_ , lowercase_ : List[Any] = [], []
lowercase_ : str = node
stacka.append(__SCREAMING_SNAKE_CASE )
while stacka: # to find the reversed order of post order, store it in stack2
lowercase_ : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__SCREAMING_SNAKE_CASE )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowercase__( __SCREAMING_SNAKE_CASE : str = "" , __SCREAMING_SNAKE_CASE : List[str]=50 , __SCREAMING_SNAKE_CASE : Dict="*" ):
if not s:
return "\n" + width * char
lowercase_ , lowercase_ : List[Any] = divmod(width - len(__SCREAMING_SNAKE_CASE ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
__SCREAMING_SNAKE_CASE =build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 213
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__SCREAMING_SNAKE_CASE ={
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__SCREAMING_SNAKE_CASE ={
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__SCREAMING_SNAKE_CASE ={
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__SCREAMING_SNAKE_CASE ={
"num_train_timesteps": 40,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
__SCREAMING_SNAKE_CASE ={
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
__SCREAMING_SNAKE_CASE ={
"num_train_timesteps": 151,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str=False ):
lowercase_ : str = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
lowercase_ : Any = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
lowercase_ : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
lowercase_ : str = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
lowercase_ : Optional[Any] = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
lowercase_ : Any = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
lowercase_ : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
lowercase_ : Dict = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
lowercase_ : Any = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
lowercase_ : Dict = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
lowercase_ : Optional[Any] = checkpoint[F'''{old_prefix}.skip_connection.weight''']
lowercase_ : Tuple = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ):
lowercase_ , lowercase_ , lowercase_ : Optional[int] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
lowercase_ , lowercase_ , lowercase_ : Dict = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
lowercase_ : Any = checkpoint[F'''{old_prefix}.norm.weight''']
lowercase_ : Tuple = checkpoint[F'''{old_prefix}.norm.bias''']
lowercase_ : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
lowercase_ : Tuple = bias_q.squeeze(-1 ).squeeze(-1 )
lowercase_ : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
lowercase_ : int = bias_k.squeeze(-1 ).squeeze(-1 )
lowercase_ : Optional[int] = weight_v.squeeze(-1 ).squeeze(-1 )
lowercase_ : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
lowercase_ : List[str] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
lowercase_ : int = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
lowercase_ : Any = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )
lowercase_ : Any = {}
lowercase_ : Dict = checkpoint['time_embed.0.weight']
lowercase_ : Optional[Any] = checkpoint['time_embed.0.bias']
lowercase_ : List[str] = checkpoint['time_embed.2.weight']
lowercase_ : Any = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
lowercase_ : str = checkpoint['label_emb.weight']
lowercase_ : Optional[Any] = checkpoint['input_blocks.0.0.weight']
lowercase_ : Optional[int] = checkpoint['input_blocks.0.0.bias']
lowercase_ : Dict = unet_config['down_block_types']
lowercase_ : int = unet_config['layers_per_block']
lowercase_ : int = unet_config['attention_head_dim']
lowercase_ : Any = unet_config['block_out_channels']
lowercase_ : Optional[Any] = 1
lowercase_ : Tuple = channels_list[0]
for i, layer_type in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = channels_list[i]
lowercase_ : Any = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[str] = F'''down_blocks.{i}.resnets.{j}'''
lowercase_ : int = F'''input_blocks.{current_layer}.0'''
lowercase_ : List[str] = True if j == 0 and downsample_block_has_skip else False
lowercase_ : str = convert_resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_skip=__SCREAMING_SNAKE_CASE )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = F'''down_blocks.{i}.resnets.{j}'''
lowercase_ : Dict = F'''input_blocks.{current_layer}.0'''
lowercase_ : Optional[Any] = True if j == 0 and downsample_block_has_skip else False
lowercase_ : Union[str, Any] = convert_resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_skip=__SCREAMING_SNAKE_CASE )
lowercase_ : str = F'''down_blocks.{i}.attentions.{j}'''
lowercase_ : Union[str, Any] = F'''input_blocks.{current_layer}.1'''
lowercase_ : int = convert_attention(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(__SCREAMING_SNAKE_CASE ) - 1:
lowercase_ : Tuple = F'''down_blocks.{i}.downsamplers.0'''
lowercase_ : str = F'''input_blocks.{current_layer}.0'''
lowercase_ : Tuple = convert_resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
current_layer += 1
lowercase_ : int = current_channels
# hardcoded the mid-block for now
lowercase_ : Any = 'mid_block.resnets.0'
lowercase_ : Optional[Any] = 'middle_block.0'
lowercase_ : Optional[int] = convert_resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = 'mid_block.attentions.0'
lowercase_ : Optional[Any] = 'middle_block.1'
lowercase_ : Dict = convert_attention(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = 'mid_block.resnets.1'
lowercase_ : str = 'middle_block.2'
lowercase_ : Dict = convert_resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = 0
lowercase_ : Union[str, Any] = unet_config['up_block_types']
for i, layer_type in enumerate(__SCREAMING_SNAKE_CASE ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowercase_ : Any = F'''up_blocks.{i}.resnets.{j}'''
lowercase_ : Any = F'''output_blocks.{current_layer}.0'''
lowercase_ : Union[str, Any] = convert_resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_skip=__SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(__SCREAMING_SNAKE_CASE ) - 1:
lowercase_ : Optional[Any] = F'''up_blocks.{i}.upsamplers.0'''
lowercase_ : Optional[int] = F'''output_blocks.{current_layer-1}.1'''
lowercase_ : Tuple = convert_resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowercase_ : Tuple = F'''up_blocks.{i}.resnets.{j}'''
lowercase_ : List[Any] = F'''output_blocks.{current_layer}.0'''
lowercase_ : Union[str, Any] = convert_resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_skip=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = F'''up_blocks.{i}.attentions.{j}'''
lowercase_ : Dict = F'''output_blocks.{current_layer}.1'''
lowercase_ : int = convert_attention(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(__SCREAMING_SNAKE_CASE ) - 1:
lowercase_ : Any = F'''up_blocks.{i}.upsamplers.0'''
lowercase_ : str = F'''output_blocks.{current_layer-1}.2'''
lowercase_ : int = convert_resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = checkpoint['out.0.weight']
lowercase_ : Dict = checkpoint['out.0.bias']
lowercase_ : Optional[int] = checkpoint['out.2.weight']
lowercase_ : Optional[int] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__SCREAMING_SNAKE_CASE =parser.parse_args()
__SCREAMING_SNAKE_CASE =strabool(args.class_cond)
__SCREAMING_SNAKE_CASE =os.path.basename(args.unet_path)
print(F"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
__SCREAMING_SNAKE_CASE =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__SCREAMING_SNAKE_CASE =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__SCREAMING_SNAKE_CASE =TEST_UNET_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
__SCREAMING_SNAKE_CASE =None
__SCREAMING_SNAKE_CASE =con_pt_to_diffuser(args.unet_path, unet_config)
__SCREAMING_SNAKE_CASE =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__SCREAMING_SNAKE_CASE =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__SCREAMING_SNAKE_CASE =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__SCREAMING_SNAKE_CASE =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
__SCREAMING_SNAKE_CASE =CMStochasticIterativeScheduler(**scheduler_config)
__SCREAMING_SNAKE_CASE =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 213
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCamelCase_ ( A__ ):
'''simple docstring'''
lowercase__ : Any = """unispeech-sat"""
def __init__( self , lowerCamelCase__=3_2 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=3_0_7_2 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase__=(1_0, 3, 3, 3, 3, 2, 2) , lowerCamelCase__=False , lowerCamelCase__=1_2_8 , lowerCamelCase__=1_6 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=0.0_5 , lowerCamelCase__=1_0 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=1_0 , lowerCamelCase__=0 , lowerCamelCase__=3_2_0 , lowerCamelCase__=2 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0_0 , lowerCamelCase__=2_5_6 , lowerCamelCase__=2_5_6 , lowerCamelCase__=0.1 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=2_5_6 , lowerCamelCase__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCamelCase__=(5, 3, 3, 1, 1) , lowerCamelCase__=(1, 2, 3, 1, 1) , lowerCamelCase__=5_1_2 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=5_0_4 , **lowerCamelCase__ , ):
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
_lowerCamelCase = hidden_size
_lowerCamelCase = feat_extract_norm
_lowerCamelCase = feat_extract_activation
_lowerCamelCase = list(_A )
_lowerCamelCase = list(_A )
_lowerCamelCase = list(_A )
_lowerCamelCase = conv_bias
_lowerCamelCase = num_conv_pos_embeddings
_lowerCamelCase = num_conv_pos_embedding_groups
_lowerCamelCase = len(self.conv_dim )
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = feat_proj_dropout
_lowerCamelCase = final_dropout
_lowerCamelCase = layerdrop
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = initializer_range
_lowerCamelCase = vocab_size
_lowerCamelCase = num_clusters
_lowerCamelCase = do_stable_layer_norm
_lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase = apply_spec_augment
_lowerCamelCase = mask_time_prob
_lowerCamelCase = mask_time_length
_lowerCamelCase = mask_time_min_masks
_lowerCamelCase = mask_feature_prob
_lowerCamelCase = mask_feature_length
_lowerCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase = num_codevectors_per_group
_lowerCamelCase = num_codevector_groups
_lowerCamelCase = contrastive_logits_temperature
_lowerCamelCase = feat_quantizer_dropout
_lowerCamelCase = num_negatives
_lowerCamelCase = codevector_dim
_lowerCamelCase = proj_codevector_dim
_lowerCamelCase = diversity_loss_weight
# ctc loss
_lowerCamelCase = ctc_loss_reduction
_lowerCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase = list(_A )
_lowerCamelCase = list(_A )
_lowerCamelCase = list(_A )
_lowerCamelCase = xvector_output_dim
@property
def snake_case__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Dict = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73
| 0
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
__a = None
def _UpperCAmelCase ( __lowerCamelCase : "pyspark.sql.DataFrame" , __lowerCamelCase : List[int] , ) -> Optional[int]:
import pyspark
def generate_fn():
_snake_case = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
_snake_case = df_with_partition_id.select('''*''' ).where(f'''part_id = {partition_id}''' ).drop('''part_id''' )
_snake_case = partition_df.collect()
_snake_case = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
def __init__( self : Optional[int] , _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : List[Any]=None , ):
_snake_case = df
_snake_case = partition_order or range(self.df.rdd.getNumPartitions() )
_snake_case = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Optional[int] ):
yield from self.generate_examples_fn()
def lowercase ( self : Any , _lowerCamelCase : np.random.Generator ):
_snake_case = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=_lowerCamelCase )
def lowercase ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int ):
_snake_case = self.split_shard_indices_by_worker(_lowerCamelCase , _lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=_lowerCamelCase )
@property
def lowercase ( self : List[str] ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
__a = SparkConfig
def __init__( self : str , _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : str = None , _lowerCamelCase : str = None , **_lowerCamelCase : List[str] , ):
import pyspark
_snake_case = pyspark.sql.SparkSession.builder.getOrCreate()
_snake_case = df
_snake_case = working_dir
super().__init__(
cache_dir=_lowerCamelCase , config_name=str(self.df.semanticHash() ) , **_lowerCamelCase , )
def lowercase ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(_lowerCamelCase : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=_lowerCamelCase )
_snake_case = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_lowerCamelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_snake_case = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_lowerCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def lowercase ( self : Dict ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowercase ( self : Dict , _lowerCamelCase : List[Any] ):
import pyspark
def get_arrow_batch_size(_lowerCamelCase : List[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
_snake_case = self.df.count()
_snake_case = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_snake_case = (
self.df.limit(_lowerCamelCase )
.repartition(1 )
.mapInArrow(_lowerCamelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_snake_case = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_snake_case = min(_lowerCamelCase , int(approx_total_size / max_shard_size ) )
_snake_case = self.df.repartition(_lowerCamelCase )
def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , ):
import pyspark
_snake_case = ParquetWriter if file_format == '''parquet''' else ArrowWriter
_snake_case = os.path.join(self._working_dir , os.path.basename(_lowerCamelCase ) ) if self._working_dir else fpath
_snake_case = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_snake_case = self.config.features
_snake_case = self._writer_batch_size
_snake_case = self._fs.storage_options
def write_arrow(_lowerCamelCase : Tuple ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_snake_case = pyspark.TaskContext().taskAttemptId()
_snake_case = next(_lowerCamelCase , _lowerCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
_snake_case = 0
_snake_case = writer_class(
features=_lowerCamelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=_lowerCamelCase , storage_options=_lowerCamelCase , embed_local_files=_lowerCamelCase , )
_snake_case = pa.Table.from_batches([first_batch] )
writer.write_table(_lowerCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_snake_case , _snake_case = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
_snake_case = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=_lowerCamelCase , storage_options=_lowerCamelCase , embed_local_files=_lowerCamelCase , )
_snake_case = pa.Table.from_batches([batch] )
writer.write_table(_lowerCamelCase )
if writer._num_bytes > 0:
_snake_case , _snake_case = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_lowerCamelCase ) ):
_snake_case = os.path.join(os.path.dirname(_lowerCamelCase ) , os.path.basename(_lowerCamelCase ) )
shutil.move(_lowerCamelCase , _lowerCamelCase )
_snake_case = (
self.df.mapInArrow(_lowerCamelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowercase ( self : int , _lowerCamelCase : "datasets.SplitGenerator" , _lowerCamelCase : str = "arrow" , _lowerCamelCase : Optional[Union[str, int]] = None , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : List[Any] , ):
self._validate_cache_dir()
_snake_case = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_lowerCamelCase )
_snake_case = not is_remote_filesystem(self._fs )
_snake_case = os.path.join if is_local else posixpath.join
_snake_case = '''-TTTTT-SSSSS-of-NNNNN'''
_snake_case = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
_snake_case = path_join(self._output_dir , _lowerCamelCase )
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = []
_snake_case = []
for task_id, content in self._prepare_split_single(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_lowerCamelCase )
_snake_case = total_num_examples
_snake_case = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
_snake_case = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_snake_case = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , ):
rename(
_lowerCamelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , )
_snake_case = []
_snake_case = 0
for i in range(len(_lowerCamelCase ) ):
_snake_case , _snake_case = task_id_and_num_shards[i]
for shard_id in range(_lowerCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_lowerCamelCase , len(_lowerCamelCase ) ).map(lambda _lowerCamelCase : _rename_shard(*_lowerCamelCase ) ).collect()
else:
# don't use any pattern
_snake_case = 0
_snake_case = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(_lowerCamelCase , '''''' ) , )
def lowercase ( self : List[str] , _lowerCamelCase : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 288
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCAmelCase__ = logging.getLogger(__name__)
class lowerCAmelCase__ ( A_ ):
__a = """masked_bert"""
def __init__( self : Union[str, Any] , _lowerCamelCase : Any=30522 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Tuple=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : str=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : int=0.0_2 , _lowerCamelCase : Union[str, Any]=1e-12 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : List[str]="topK" , _lowerCamelCase : Optional[Any]="constant" , _lowerCamelCase : Optional[Any]=0.0 , **_lowerCamelCase : str , ):
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = pruning_method
_snake_case = mask_init
_snake_case = mask_scale
| 288
| 1
|
"""simple docstring"""
from PIL import Image
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Image:
"""simple docstring"""
def brightness(__snake_case ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__snake_case )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
_a = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 100
|
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_a = get_logger(__name__)
class _UpperCAmelCase:
def __init__( self , __a = None) -> List[str]:
'''simple docstring'''
_UpperCamelCase = (
os.path.join(__a , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCamelCase = Extractor
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCamelCase = os.path.abspath(__a)
return os.path.join(self.extract_dir , hash_url_to_filename(__a))
def UpperCAmelCase ( self , __a , __a) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(__a) and not (os.path.isdir(__a) and os.listdir(__a))
)
def UpperCAmelCase ( self , __a , __a = False) -> str:
'''simple docstring'''
_UpperCamelCase = self.extractor.infer_extractor_format(__a)
if not extractor_format:
return input_path
_UpperCamelCase = self._get_output_path(__a)
if self._do_extract(__a , __a):
self.extractor.extract(__a , __a , __a)
return output_path
class _UpperCAmelCase( lowerCamelCase ):
@classmethod
@abstractmethod
def UpperCAmelCase ( cls , __a , **__a) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
...
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase ):
lowercase__ = []
@staticmethod
def UpperCAmelCase ( __a , __a) -> Any:
'''simple docstring'''
with open(__a , '''rb''') as f:
return f.read(__a)
@classmethod
def UpperCAmelCase ( cls , __a , __a = b"") -> bool:
'''simple docstring'''
if not magic_number:
_UpperCamelCase = max(len(__a) for cls_magic_number in cls.magic_numbers)
try:
_UpperCamelCase = cls.read_magic_number(__a , __a)
except OSError:
return False
return any(magic_number.startswith(__a) for cls_magic_number in cls.magic_numbers)
class _UpperCAmelCase( lowerCamelCase ):
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(__a)
@staticmethod
def UpperCAmelCase ( __a , __a) -> List[str]:
'''simple docstring'''
def resolved(__a) -> str:
return os.path.realpath(os.path.abspath(__a))
def badpath(__a , __a) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__a , __a)).startswith(__a)
def badlink(__a , __a) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCamelCase = resolved(os.path.join(__a , os.path.dirname(info.name)))
return badpath(info.linkname , base=__a)
_UpperCamelCase = resolved(__a)
for finfo in members:
if badpath(finfo.name , __a):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''')
elif finfo.issym() and badlink(__a , __a):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''')
elif finfo.islnk() and badlink(__a , __a):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''')
else:
yield finfo
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a)
_UpperCamelCase = tarfile.open(__a)
tar_file.extractall(__a , members=TarExtractor.safemembers(__a , __a))
tar_file.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x1F\x8B']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with gzip.open(__a , '''rb''') as gzip_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def UpperCAmelCase ( cls , __a , __a = b"") -> bool:
'''simple docstring'''
if super().is_extractable(__a , magic_number=__a):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__a , '''rb''') as fp:
_UpperCamelCase = _EndRecData(__a)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCamelCase = fp.read(__a) # CD is where we expect it to be
if len(__a) == sizeCentralDir:
_UpperCamelCase = struct.unpack(__a , __a) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a)
with zipfile.ZipFile(__a , '''r''') as zip_file:
zip_file.extractall(__a)
zip_file.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with lzma.open(__a) as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''')
import rarfile
os.makedirs(__a , exist_ok=__a)
_UpperCamelCase = rarfile.RarFile(__a)
rf.extractall(__a)
rf.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''')
import zstandard as zstd
_UpperCamelCase = zstd.ZstdDecompressor()
with open(__a , '''rb''') as ifh, open(__a , '''wb''') as ofh:
dctx.copy_stream(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x42\x5A\x68']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with bza.open(__a , '''rb''') as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''')
import pyazr
os.makedirs(__a , exist_ok=__a)
with pyazr.SevenZipFile(__a , '''r''') as archive:
archive.extractall(__a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x04\x22\x4D\x18']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''')
import lza.frame
with lza.frame.open(__a , '''rb''') as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase:
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowercase__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase ( cls) -> Any:
'''simple docstring'''
return max(
len(__a)
for extractor in cls.extractors.values()
if issubclass(__a , __a)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def UpperCAmelCase ( __a , __a) -> List[str]:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__a , magic_number_length=__a)
except OSError:
return b""
@classmethod
def UpperCAmelCase ( cls , __a , __a = False) -> bool:
'''simple docstring'''
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=__a , )
_UpperCamelCase = cls.infer_extractor_format(__a)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase ( cls , __a) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_UpperCamelCase = cls._get_magic_number_max_length()
_UpperCamelCase = cls._read_magic_number(__a , __a)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__a , magic_number=__a):
return extractor_format
@classmethod
def UpperCAmelCase ( cls , __a , __a , __a = None , __a = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(__a) , exist_ok=__a)
# Prevent parallel extractions
_UpperCamelCase = str(Path(__a).with_suffix('''.lock'''))
with FileLock(__a):
shutil.rmtree(__a , ignore_errors=__a)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__a , __a): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=__a , )
_UpperCamelCase = extractor if extractor != '''deprecated''' else extractor_format
else:
_UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(__a , __a)
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=__a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__a):
return extractor.extract(__a , __a)
| 100
| 1
|
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
A : Dict = re.compile(R"([A-Z]+)([A-Z][a-z])")
A : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
A : Any = re.compile(R"(?<!_)_(?!_)")
A : List[Any] = re.compile(R"(_{2,})")
A : Union[str, Any] = R"^\w+(\.\w+)*$"
A : Union[str, Any] = R"<>:/\|?*"
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = _uppercase_uppercase_re.sub(R"\1_\2" , _UpperCamelCase )
__lowerCAmelCase = _lowercase_uppercase_re.sub(R"\1_\2" , _UpperCamelCase )
return name.lower()
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = _single_underscore_re.split(_UpperCamelCase )
__lowerCAmelCase = [_multiple_underscores_re.split(_UpperCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_UpperCamelCase ) if n != "" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if os.path.basename(_UpperCamelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if os.path.basename(_UpperCamelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , _UpperCamelCase ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(_UpperCamelCase )}-{split}"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowerCAmelCase = filename_prefix_for_split(_UpperCamelCase , _UpperCamelCase )
if filetype_suffix:
prefix += f".{filetype_suffix}"
__lowerCAmelCase = os.path.join(_UpperCamelCase , _UpperCamelCase )
return f"{filepath}*"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
__lowerCAmelCase = filename_prefix_for_split(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = os.path.join(_UpperCamelCase , _UpperCamelCase )
if shard_lengths:
__lowerCAmelCase = len(_UpperCamelCase )
__lowerCAmelCase = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(_UpperCamelCase )]
if filetype_suffix:
__lowerCAmelCase = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
__lowerCAmelCase = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 57
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if hor == 1_28:
A_ : List[Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A_ : Tuple = (32, 1_28, 2_56)
A_ : Optional[int] = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A_ : Union[str, Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A_ : Any = (32, 64, 1_28, 2_56)
A_ : int = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A_ : List[str] = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A_ : List[Any] = model.state_dict()
A_ : List[str] = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A_ : Union[str, Any] = UNetaDModel(**__lowercase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A_ : Optional[Any] = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A_ : Optional[int] = state_dict.pop(__lowercase )
hf_value_function.load_state_dict(__lowercase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__lowercase ,__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Any = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A_ : Union[str, Any] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A_ : List[Any] = model
A_ : Union[str, Any] = UNetaDModel(**__lowercase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A_ : Optional[int] = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A_ : List[str] = state_dict.pop(__lowercase )
hf_value_function.load_state_dict(__lowercase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__lowercase ,__lowercase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 140
| 0
|
"""simple docstring"""
from __future__ import annotations
import requests
__lowerCAmelCase : Dict =set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :str = "new" , lowerCAmelCase__ :list | None = None ) -> dict:
'''simple docstring'''
lowercase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase__ ) - valid_terms ) ):
lowercase = f'Invalid search term: {invalid_search_terms}'
raise ValueError(lowerCAmelCase__ )
lowercase = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
lowercase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase__ )}
lowercase = {}
for id_ in range(lowerCAmelCase__ ):
lowercase = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 366
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase = str(abs(lowerCAmelCase__ ) )
lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32
| 0
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __A ( A_ ):
a__ : torch.FloatTensor
a__ : Optional[torch.FloatTensor] = None
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Tuple=0.999 , snake_case_ : Optional[Any]="cosine" , ) -> Any:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(__lowerCamelCase ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCamelCase ) / alpha_bar_fn(__lowerCamelCase ) , __lowerCamelCase ) )
return torch.tensor(__lowerCamelCase , dtype=torch.floataa )
class __A ( A_ , A_ ):
@register_to_config
def __init__(self : str , __a : int = 1000 , __a : str = "fixed_small_log" , __a : bool = True , __a : Optional[float] = 1.0 , __a : str = "epsilon" , __a : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase_ = betas_for_alpha_bar(snake_case__ )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase_ = 1.0
# setable values
UpperCAmelCase_ = None
UpperCAmelCase_ = torch.from_numpy(np.arange(0 , snake_case__ )[::-1].copy() )
UpperCAmelCase_ = variance_type
def _lowercase (self : int , __a : torch.FloatTensor , __a : Optional[int] = None ):
return sample
def _lowercase (self : Optional[int] , __a : int , __a : Union[str, torch.device] = None ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase_ = (np.arange(0 , snake_case__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase_ = torch.from_numpy(snake_case__ ).to(snake_case__ )
def _lowercase (self : Optional[int] , __a : Dict , __a : Union[str, Any]=None , __a : Optional[Any]=None , __a : Tuple=None ):
if prev_timestep is None:
UpperCAmelCase_ = t - 1
UpperCAmelCase_ = self.alphas_cumprod[t]
UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase_ = self.betas[t]
else:
UpperCAmelCase_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase_ = torch.log(torch.clamp(snake_case__ , min=1E-20 ) )
UpperCAmelCase_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase_ = variance.log()
UpperCAmelCase_ = beta.log()
UpperCAmelCase_ = (predicted_variance + 1) / 2
UpperCAmelCase_ = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase (self : List[str] , __a : torch.FloatTensor , __a : int , __a : torch.FloatTensor , __a : Optional[int] = None , __a : int=None , __a : bool = True , ):
UpperCAmelCase_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase_ = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
UpperCAmelCase_ = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase_ = t - 1
UpperCAmelCase_ = self.alphas_cumprod[t]
UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase_ = self.betas[t]
UpperCAmelCase_ = self.alphas[t]
else:
UpperCAmelCase_ = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(
snake_case__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ = 0
if t > 0:
UpperCAmelCase_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=snake_case__ , device=model_output.device )
UpperCAmelCase_ = self._get_variance(
snake_case__ , predicted_variance=snake_case__ , prev_timestep=snake_case__ , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase_ = variance
elif self.variance_type == "learned_range":
UpperCAmelCase_ = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
UpperCAmelCase_ = variance * variance_noise
UpperCAmelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _lowercase (self : Dict , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.IntTensor , ):
UpperCAmelCase_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase_ = timesteps.to(original_samples.device )
UpperCAmelCase_ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase_ = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase_ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 1
|
__lowerCamelCase = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_5818,
}
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case : List[Any] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(__lowerCamelCase )}"""
)
raise ValueError(__lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"text": Value("string" )} )
snake_case_ = Features({"summary": Value("string" )} )
snake_case_ = "text"
snake_case_ = "summary"
@property
def lowerCAmelCase ( self : List[str] )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 357
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
_lowercase : Optional[int] = namedtuple("CoinsDistribResult", "moves excess")
def snake_case_ ( __SCREAMING_SNAKE_CASE : TreeNode | None ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__SCREAMING_SNAKE_CASE ) != count_coins(__SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase_ , lowercase_ : Tuple = get_distrib(node.left )
lowercase_ , lowercase_ : Dict = get_distrib(node.right )
lowercase_ : Dict = 1 - left_distrib_excess
lowercase_ : Optional[int] = 1 - right_distrib_excess
lowercase_ : Tuple = (
left_distrib_moves
+ right_distrib_moves
+ abs(__SCREAMING_SNAKE_CASE )
+ abs(__SCREAMING_SNAKE_CASE )
)
lowercase_ : int = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return get_distrib(__SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : List[Any] = '''AutoImageProcessor'''
_UpperCAmelCase : Dict = '''AutoTokenizer'''
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Union[str, Any] = kwargs.pop('feature_extractor')
__lowerCamelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.image_processor
__lowerCamelCase : Optional[int] = False
def __call__( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = kwargs.pop('images' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = kwargs.pop('text' ,SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > 0:
__lowerCamelCase : int = args[0]
__lowerCamelCase : List[str] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
__lowerCamelCase : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None:
__lowerCamelCase : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : Optional[Any] = encodings['input_ids']
return inputs
def lowerCAmelCase ( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : Any):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@contextmanager
def lowerCAmelCase ( self : Tuple):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.')
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = self.tokenizer
yield
__lowerCamelCase : Tuple = self.image_processor
__lowerCamelCase : Tuple = False
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : List[Any]=None):
if added_vocab is None:
__lowerCamelCase : str = self.tokenizer.get_added_vocab()
__lowerCamelCase : Union[str, Any] = {}
while tokens:
__lowerCamelCase : Tuple = re.search(R'<s_(.*?)>' ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
if start_token is None:
break
__lowerCamelCase : Dict = start_token.group(1)
__lowerCamelCase : List[str] = re.search(RF"</s_{key}>" ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
__lowerCamelCase : Optional[int] = start_token.group()
if end_token is None:
__lowerCamelCase : List[Any] = tokens.replace(SCREAMING_SNAKE_CASE__ ,'')
else:
__lowerCamelCase : Tuple = end_token.group()
__lowerCamelCase : int = re.escape(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = re.escape(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = re.search(F"{start_token_escaped}(.*?){end_token_escaped}" ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
if content is not None:
__lowerCamelCase : List[Any] = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCamelCase : str = self.tokenajson(SCREAMING_SNAKE_CASE__ ,is_inner_value=SCREAMING_SNAKE_CASE__ ,added_vocab=SCREAMING_SNAKE_CASE__)
if value:
if len(SCREAMING_SNAKE_CASE__) == 1:
__lowerCamelCase : Tuple = value[0]
__lowerCamelCase : int = value
else: # leaf nodes
__lowerCamelCase : Tuple = []
for leaf in content.split(R'<sep/>'):
__lowerCamelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCamelCase : str = leaf[1:-2] # for categorical special tokens
output[key].append(SCREAMING_SNAKE_CASE__)
if len(output[key]) == 1:
__lowerCamelCase : Dict = output[key][0]
__lowerCamelCase : Dict = tokens[tokens.find(SCREAMING_SNAKE_CASE__) + len(SCREAMING_SNAKE_CASE__) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] ,is_inner_value=SCREAMING_SNAKE_CASE__ ,added_vocab=SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCAmelCase ( self : List[str]):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor_class
@property
def lowerCAmelCase ( self : List[Any]):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor
| 73
| 0
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)])
def lowerCAmelCase ( self , snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : List[str] =GenerationConfig(
do_sample=snake_case , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case , config_name=snake_case)
_UpperCAmelCase : str =GenerationConfig.from_pretrained(snake_case , config_name=snake_case)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , snake_case)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , snake_case)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple =AutoConfig.from_pretrained('gpt2')
_UpperCAmelCase : Optional[int] =GenerationConfig.from_model_config(snake_case)
_UpperCAmelCase : str =GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(snake_case , snake_case)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =GenerationConfig()
_UpperCAmelCase : Any ={
'max_new_tokens': 1_0_2_4,
'foo': 'bar',
}
_UpperCAmelCase : Dict =copy.deepcopy(snake_case)
_UpperCAmelCase : Tuple =generation_config.update(**snake_case)
# update_kwargs was not modified (no side effects)
self.assertEqual(snake_case , snake_case)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(snake_case , {'foo': 'bar'})
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =GenerationConfig()
_UpperCAmelCase : Dict ='bar'
with tempfile.TemporaryDirectory('test-generation-config') as tmp_dir:
generation_config.save_pretrained(snake_case)
_UpperCAmelCase : Optional[int] =GenerationConfig.from_pretrained(snake_case)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar')
_UpperCAmelCase : List[Any] =GenerationConfig.from_model_config(snake_case)
assert not hasattr(snake_case , 'foo') # no new kwargs should be initialized if from config
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Any =GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , snake_case)
self.assertEqual(default_config.num_beams , 1)
_UpperCAmelCase : str =GenerationConfig(
do_sample=snake_case , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , snake_case)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case)
_UpperCAmelCase : str =GenerationConfig.from_pretrained(snake_case , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , snake_case)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase ( cls) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =TOKEN
HfFolder.save_token(snake_case)
@classmethod
def lowerCAmelCase ( cls) -> Any:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org')
except HTTPError:
pass
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =GenerationConfig(
do_sample=snake_case , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token)
_UpperCAmelCase : Dict =GenerationConfig.from_pretrained(f"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case , getattr(snake_case , snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case , repo_id='test-generation-config' , push_to_hub=snake_case , use_auth_token=self._token)
_UpperCAmelCase : Any =GenerationConfig.from_pretrained(f"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case , getattr(snake_case , snake_case))
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : str =GenerationConfig(
do_sample=snake_case , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token)
_UpperCAmelCase : Union[str, Any] =GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case , getattr(snake_case , snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case , repo_id='valid_org/test-generation-config-org' , push_to_hub=snake_case , use_auth_token=self._token)
_UpperCAmelCase : Union[str, Any] =GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case , getattr(snake_case , snake_case))
| 371
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict ='ZinengTang/tvlt-base'
_UpperCAmelCase : Dict =tempfile.mkdtemp()
def lowerCAmelCase ( self , **snake_case) -> Union[str, Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **snake_case)
def lowerCAmelCase ( self , **snake_case) -> Dict:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **snake_case)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any =self.get_image_processor()
_UpperCAmelCase : Optional[Any] =self.get_feature_extractor()
_UpperCAmelCase : str =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
processor.save_pretrained(self.tmpdirname)
_UpperCAmelCase : str =TvltProcessor.from_pretrained(self.tmpdirname)
self.assertIsInstance(processor.feature_extractor , snake_case)
self.assertIsInstance(processor.image_processor , snake_case)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str =self.get_image_processor()
_UpperCAmelCase : List[Any] =self.get_feature_extractor()
_UpperCAmelCase : str =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
_UpperCAmelCase : Optional[int] =np.ones([1_2_0_0_0])
_UpperCAmelCase : str =feature_extractor(snake_case , return_tensors='np')
_UpperCAmelCase : Dict =processor(audio=snake_case , return_tensors='np')
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict =self.get_image_processor()
_UpperCAmelCase : int =self.get_feature_extractor()
_UpperCAmelCase : int =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
_UpperCAmelCase : Union[str, Any] =np.ones([3, 2_2_4, 2_2_4])
_UpperCAmelCase : Optional[Any] =image_processor(snake_case , return_tensors='np')
_UpperCAmelCase : List[Any] =processor(images=snake_case , return_tensors='np')
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.get_image_processor()
_UpperCAmelCase : Dict =self.get_feature_extractor()
_UpperCAmelCase : Optional[int] =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
_UpperCAmelCase : Optional[int] =np.ones([1_2_0_0_0])
_UpperCAmelCase : str =np.ones([3, 2_2_4, 2_2_4])
_UpperCAmelCase : Optional[int] =processor(audio=snake_case , images=snake_case)
self.assertListEqual(list(inputs.keys()) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'])
# test if it raises when no input is passed
with pytest.raises(snake_case):
processor()
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.get_image_processor()
_UpperCAmelCase : Tuple =self.get_feature_extractor()
_UpperCAmelCase : Dict =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 242
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Dict = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__lowercase : str = '''CIDAS/clipseg-rd64-refined'''
__lowercase : List[Any] = '''image_segmenter'''
__lowercase : str = CLIPSegForImageSegmentation
__lowercase : Dict = ['''image''', '''text''']
__lowercase : Union[str, Any] = ['''image''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""vision"""])
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
return self.pre_processor(text=[label] , images=[image] , padding=lowerCAmelCase__ , return_tensors="""pt""")
def snake_case_ ( self , lowerCAmelCase__):
with torch.no_grad():
__SCREAMING_SNAKE_CASE = self.model(**lowerCAmelCase__).logits
return logits
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = outputs.cpu().detach().numpy()
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 100
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ = logging.get_logger(__name__)
@add_end_docstrings(
__a , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__):
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__)
else:
raise ValueError("""Unsupported framework""")
return masked_index
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.get_masked_index(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def snake_case_ ( self , lowerCAmelCase__):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__):
if return_tensors is None:
__SCREAMING_SNAKE_CASE = self.framework
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
self.ensure_exactly_one_mask_token(lowerCAmelCase__)
return model_inputs
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.model(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""]
return model_outputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=5 , lowerCAmelCase__=None):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__SCREAMING_SNAKE_CASE = target_ids.shape[0]
__SCREAMING_SNAKE_CASE = model_outputs["""input_ids"""][0]
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""]
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
__SCREAMING_SNAKE_CASE = outputs.numpy()
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCAmelCase__ , axis=-1)
if target_ids is not None:
__SCREAMING_SNAKE_CASE = tf.gather_nd(tf.squeeze(lowerCAmelCase__ , 0) , target_ids.reshape(-1 , 1))
__SCREAMING_SNAKE_CASE = tf.expand_dims(lowerCAmelCase__ , 0)
__SCREAMING_SNAKE_CASE = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = topk.values.numpy(), topk.indices.numpy()
else:
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1)
if target_ids is not None:
__SCREAMING_SNAKE_CASE = probs[..., target_ids]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = probs.topk(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
__SCREAMING_SNAKE_CASE = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
__SCREAMING_SNAKE_CASE = input_ids.numpy().copy()
if target_ids is not None:
__SCREAMING_SNAKE_CASE = target_ids[p].tolist()
__SCREAMING_SNAKE_CASE = p
# Filter padding out:
__SCREAMING_SNAKE_CASE = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p]), """sequence""": sequence}
row.append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
if single_mask:
return result[0]
return result
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [targets]
try:
__SCREAMING_SNAKE_CASE = self.tokenizer.get_vocab()
except Exception:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = []
for target in targets:
__SCREAMING_SNAKE_CASE = vocab.get(lowerCAmelCase__ , lowerCAmelCase__)
if id_ is None:
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , max_length=1 , truncation=lowerCAmelCase__ , )["""input_ids"""]
if len(lowerCAmelCase__) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""")
continue
__SCREAMING_SNAKE_CASE = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.")
target_ids.append(id_)
__SCREAMING_SNAKE_CASE = list(set(lowerCAmelCase__))
if len(lowerCAmelCase__) == 0:
raise ValueError("""At least one target must be provided when passed.""")
__SCREAMING_SNAKE_CASE = np.array(lowerCAmelCase__)
return target_ids
def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = {}
if targets is not None:
__SCREAMING_SNAKE_CASE = self.get_target_ids(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = target_ids
if top_k is not None:
__SCREAMING_SNAKE_CASE = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""")
return {}, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) == 1:
return outputs[0]
return outputs
| 100
| 1
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : int , _snake_case : int ):
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
lowerCAmelCase : Tuple = number_of_bytes // partitions
lowerCAmelCase : str = []
for i in range(_snake_case ):
lowerCAmelCase : List[str] = i * bytes_per_partition + 1
lowerCAmelCase : str = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
snake_case__ : Optional[Any] = logging.getLogger(__name__)
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Tuple = git.Repo(search_parent_directories=_snake_case )
lowerCAmelCase : Optional[int] = {
'''repo_id''': str(_snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f:
json.dump(_snake_case , _snake_case , indent=4 )
def _snake_case ( _snake_case : Any ):
if params.n_gpu <= 0:
lowerCAmelCase : Dict = 0
lowerCAmelCase : Optional[int] = -1
lowerCAmelCase : Dict = True
lowerCAmelCase : int = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCAmelCase : str = int(os.environ['''WORLD_SIZE'''] )
lowerCAmelCase : Optional[int] = int(os.environ['''N_GPU_NODE'''] )
lowerCAmelCase : int = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowerCAmelCase : Dict = params.world_size // params.n_gpu_per_node
lowerCAmelCase : int = params.global_rank // params.n_gpu_per_node
lowerCAmelCase : str = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : Any = 1
lowerCAmelCase : Any = 1
lowerCAmelCase : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCAmelCase : Tuple = params.node_id == 0 and params.local_rank == 0
lowerCAmelCase : List[Any] = params.n_nodes > 1
# summary
lowerCAmelCase : Optional[int] = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def _snake_case ( _snake_case : Optional[int] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 314
| 0
|
import math
import sys
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Any = ''
try:
with open(__A , '''rb''' ) as binary_file:
snake_case__ : int = binary_file.read()
for dat in data:
snake_case__ : Dict = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = {'0': '0', '1': '1'}
snake_case__ : Dict = '', ''
snake_case__ : Optional[Any] = len(__A )
for i in range(len(__A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case__ : str = lexicon[curr_string]
result += last_match_id
snake_case__ : List[Any] = last_match_id + '0'
if math.loga(__A ).is_integer():
snake_case__ : List[Any] = {}
for curr_key in list(__A ):
snake_case__ : Tuple = lexicon.pop(__A )
snake_case__ : List[Any] = new_lex
snake_case__ : str = last_match_id + '1'
index += 1
snake_case__ : int = ''
return result
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ : Any = 8
try:
with open(__A , '''wb''' ) as opened_file:
snake_case__ : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__A ) , __A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__A , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Tuple = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
snake_case__ : Any = data_bits[counter:]
snake_case__ : List[Any] = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ : Dict = read_file_binary(__A )
snake_case__ : Any = remove_prefix(__A )
snake_case__ : Dict = decompress_data(__A )
write_file_binary(__A , __A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 230
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = 'T5Config'
def SCREAMING_SNAKE_CASE_ ( __A : jnp.array , __A : int , __A : int ) -> jnp.ndarray:
"""simple docstring"""
a_ : Dict = jnp.zeros_like(__A )
a_ : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
a_ : str = shifted_input_ids.at[:, 0].set(__A )
a_ : int = jnp.where(shifted_input_ids == -1_00 , __A , __A )
return shifted_input_ids
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''mt5'''
snake_case__ : List[Any] = MTaConfig
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''mt5'''
snake_case__ : List[str] = MTaConfig
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = '''mt5'''
snake_case__ : Union[str, Any] = MTaConfig
| 32
| 0
|
def lowerCamelCase_ (UpperCamelCase__ : list[int] ):
_UpperCAmelCase : Tuple = len(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if numbers[j] < numbers[i]:
_UpperCAmelCase : Optional[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase :Optional[int] = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 358
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase :Optional[Any] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 68
| 0
|
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=__snake_case ):
_a : Any= ["transformers", "torch", "note_seq"]
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(self ,["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""transformers""", """torch""", """note_seq"""] )
| 20
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if return_pvalue:
A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
| 3
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A = logging.get_logger(__name__)
__A = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class __lowerCAmelCase ( a__ ):
"""simple docstring"""
snake_case_ = '''van'''
def __init__( self , lowerCamelCase__=224 , lowerCamelCase__=3 , lowerCamelCase__=[7, 3, 3, 3] , lowerCamelCase__=[4, 2, 2, 2] , lowerCamelCase__=[64, 128, 320, 512] , lowerCamelCase__=[3, 3, 12, 3] , lowerCamelCase__=[8, 8, 4, 4] , lowerCamelCase__="gelu" , lowerCamelCase__=0.02 , lowerCamelCase__=1e-6 , lowerCamelCase__=1e-2 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = mlp_ratios
__lowerCamelCase = hidden_act
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = drop_path_rate
__lowerCamelCase = dropout_rate
| 354
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = hidden_dim
__lowerCamelCase = hidden_dim
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase = self.num_queries
__lowerCamelCase = self.num_labels
__lowerCamelCase = [1, 1, 1, 1]
__lowerCamelCase = self.num_channels
__lowerCamelCase = 64
__lowerCamelCase = 128
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
return config
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__lowerCamelCase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__lowerCamelCase = self.model_tester.get_config()
__lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ )
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']]
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 348
| 0
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase ,__UpperCAmelCase )
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = emb.weight.shape
lowerCamelCase__ = nn.Linear(__UpperCAmelCase ,__UpperCAmelCase ,bias=__UpperCAmelCase )
lowerCamelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase__(__snake_case ,__snake_case=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = {}
for old_key in state_dict.keys():
lowerCamelCase__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase__ = key.replace('''moe_layer.experts.0''' ,F'ffn.experts.expert_{expert_idx}' )
else:
lowerCamelCase__ = key.replace('''moe_layer.experts.''' ,'''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase__ = key.replace('''.moe_layer.gate.wg''' ,'''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase__ = key.replace('''.fc2.''' ,'''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase__ = key.replace('''.fc1.''' ,'''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase__ = key.replace('''.encoder_attn.''' ,'''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase__ = key.replace('''encoder_attn_layer_norm''' ,'''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase__ = key.replace('''final_layer_norm''' ,'''ff_layer_norm''' )
lowerCamelCase__ = state_dict[old_key]
return new_dict
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case = WEIGHTS_NAME ) -> int:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = 0
os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase )
for expert in range(__UpperCAmelCase ):
lowerCamelCase__ = switch_checkpoint_path + F'-rank-{expert}.pt'
if os.path.isfile(__UpperCAmelCase ):
lowerCamelCase__ = torch.load(__UpperCAmelCase )["""model"""]
remove_ignore_keys_(__UpperCAmelCase )
lowerCamelCase__ = rename_fairseq_keys(__UpperCAmelCase ,__UpperCAmelCase )
lowerCamelCase__ = os.path.join(
__UpperCAmelCase ,weights_name.replace('''.bin''' ,F'-{len(__UpperCAmelCase )+1:05d}-of-???.bin' ) )
torch.save(__UpperCAmelCase ,__UpperCAmelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__UpperCAmelCase )[0]].dtype )
# Add the last block
lowerCamelCase__ = os.path.join(__UpperCAmelCase ,weights_name.replace('''.bin''' ,F'-{len(__UpperCAmelCase )+1:05d}-of-???.bin' ) )
lowerCamelCase__ = torch.load(switch_checkpoint_path + '''-shared.pt''' )["""model"""]
remove_ignore_keys_(__UpperCAmelCase )
lowerCamelCase__ = rename_fairseq_keys(__UpperCAmelCase ,__UpperCAmelCase )
lowerCamelCase__ = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__UpperCAmelCase ) == 1:
lowerCamelCase__ = os.path.join(__UpperCAmelCase ,__UpperCAmelCase )
torch.save(__UpperCAmelCase ,__UpperCAmelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__UpperCAmelCase ,__UpperCAmelCase )
# Otherwise, let's build the index
lowerCamelCase__ = {}
for idx, shard in enumerate(__UpperCAmelCase ):
lowerCamelCase__ = weights_name.replace('''.bin''' ,F'-{idx+1:05d}-of-{len(__UpperCAmelCase ):05d}.bin' )
lowerCamelCase__ = os.path.join(__UpperCAmelCase ,weights_name.replace('''.bin''' ,F'-{idx+1:05d}-of-???.bin' ) )
os.rename(__UpperCAmelCase ,os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) )
for key in shard:
lowerCamelCase__ = shard_file
# Add the metadata
lowerCamelCase__ = {"""total_size""": total_size}
lowerCamelCase__ = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) ,'''w''' ,encoding='''utf-8''' ) as f:
lowerCamelCase__ = json.dumps(__UpperCAmelCase ,indent=2 ,sort_keys=__UpperCAmelCase ) + """\n"""
f.write(__UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_a = parser.parse_args()
_a , _a = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_a = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_a = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 209
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase_ ( __UpperCAmelCase ) -> tuple:
return (data["data"], data["target"])
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
lowerCAmelCase__ : List[Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__UpperCAmelCase , __UpperCAmelCase )
# Predict target for test data
lowerCAmelCase__ : Dict = xgb.predict(__UpperCAmelCase )
lowerCAmelCase__ : Any = predictions.reshape(len(__UpperCAmelCase ) , 1 )
return predictions
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Optional[Any] = fetch_california_housing()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = data_handling(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = train_test_split(
__UpperCAmelCase , __UpperCAmelCase , test_size=0.25 , random_state=1 )
lowerCAmelCase__ : Optional[Any] = xgboost(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(__UpperCAmelCase , __UpperCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(__UpperCAmelCase , __UpperCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 242
| 0
|
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = np.full((len(__lowerCAmelCase ), sequence_length, 2) , __lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = np.full((len(__lowerCAmelCase ), sequence_length) , __lowerCAmelCase )
for i, tensor in enumerate(__lowerCAmelCase ):
if padding_side == "right":
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE__ : Any = tensor[:sequence_length]
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE__ : List[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def _lowercase ( __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any = ord(__lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
SCREAMING_SNAKE_CASE__ : List[Any] = unicodedata.category(__lowerCAmelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :PreTrainedTokenizerBase
_SCREAMING_SNAKE_CASE :Union[bool, str, PaddingStrategy] = True
_SCREAMING_SNAKE_CASE :Optional[int] = None
_SCREAMING_SNAKE_CASE :Optional[int] = None
_SCREAMING_SNAKE_CASE :int = -1_00
_SCREAMING_SNAKE_CASE :str = "pt"
def _a ( self , _a ) -> int:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE__ : Tuple = """label""" if """label""" in features[0].keys() else """labels"""
SCREAMING_SNAKE_CASE__ : int = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(batch["""entity_ids"""] ).shape[1]
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.padding_side
if padding_side == "right":
SCREAMING_SNAKE_CASE__ : Any = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
SCREAMING_SNAKE_CASE__ : List[str] = [feature["""ner_tags"""] for feature in features]
SCREAMING_SNAKE_CASE__ : List[str] = padding_tensor(_a , -1 , _a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = [feature["""original_entity_spans"""] for feature in features]
SCREAMING_SNAKE_CASE__ : Tuple = padding_tensor(_a , (-1, -1) , _a , _a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 56
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a :Dict = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Optional[int] = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :List[str] = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56
| 1
|
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase = 1 , _lowercase = 1 , _lowercase = 1.0e4 , _lowercase = False , _lowercase = 1.0 , ) -> Dict:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
UpperCAmelCase : Optional[int] = float(embedding_dim // 2 )
UpperCAmelCase : Optional[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCAmelCase : str = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCAmelCase : int = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
UpperCAmelCase : List[str] = scale * emb
if flip_sin_to_cos:
UpperCAmelCase : Optional[int] = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
UpperCAmelCase : Optional[Any] = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
UpperCAmelCase : Dict = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class UpperCamelCase_ ( nn.Module ):
lowercase = 32
lowercase = jnp.floataa
@nn.compact
def __call__( self , A ) -> str:
UpperCAmelCase : Tuple = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(__lowerCamelCase )
UpperCAmelCase : Union[str, Any] = nn.silu(__lowerCamelCase )
UpperCAmelCase : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(__lowerCamelCase )
return temb
class UpperCamelCase_ ( nn.Module ):
lowercase = 32
lowercase = False
lowercase = 1
@nn.compact
def __call__( self , A ) -> List[str]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 265
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 0
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = (boundary[1] - boundary[0]) / steps
_UpperCAmelCase = boundary[0]
_UpperCAmelCase = boundary[1]
_UpperCAmelCase = make_points(_A , _A , _A )
_UpperCAmelCase = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = a + h
while x < (b - h):
yield x
_UpperCAmelCase = x + h
def lowercase ( _SCREAMING_SNAKE_CASE : int ): # enter your function here
'''simple docstring'''
_UpperCAmelCase = (x - 0) * (x - 0)
return y
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = 0.0 # Lower bound of integration
_UpperCAmelCase = 1.0 # Upper bound of integration
_UpperCAmelCase = 10.0 # define number of steps or resolution
_UpperCAmelCase = [a, b] # define boundary of integration
_UpperCAmelCase = method_a(_A , _A )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 358
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' )
return parser.parse_args()
__A : Union[str, Any] = load("accuracy")
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = eval_pred
_UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None:
super().__init__()
_UpperCAmelCase = trainer
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any:
if control.should_evaluate:
_UpperCAmelCase = deepcopy(__UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = get_args()
set_seed(args.seed )
_UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 )
_UpperCAmelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase = False
_UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 )
_UpperCAmelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase = train_test_validation.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , )
_UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
print('''Training...''' )
trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 326
| 0
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowercase_ ( __UpperCAmelCase ) -> Dict:
def decorator(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def lowercase_ ( *__UpperCAmelCase ) -> Any:
def decorator(__UpperCAmelCase ):
lowerCAmelCase__ : Any = getattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class _lowerCamelCase ( a_ ):
def __new__( cls : int , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = super().__new__(cls , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not hasattr(UpperCamelCase , """key_handler""" ):
setattr(UpperCamelCase , """key_handler""" , {} )
setattr(UpperCamelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
lowerCAmelCase__ : int = getattr(UpperCamelCase , """handle_key""" , [] )
for key in handled_keys:
lowerCAmelCase__ : List[str] = value
return new_cls
@staticmethod
def _lowerCAmelCase ( cls : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = get_character()
if char != KEYMAP["undefined"]:
lowerCAmelCase__ : Union[str, Any] = ord(UpperCamelCase )
lowerCAmelCase__ : Dict = cls.key_handler.get(UpperCamelCase )
if handler:
lowerCAmelCase__ : Dict = char
return handler(cls )
else:
return None
def lowercase_ ( cls ) -> Dict:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 242
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = CLIPConfig
__lowerCamelCase = ['CLIPEncoderLayer']
def __init__( self , lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowercase )
A__ = CLIPVisionModel(config.vision_config )
A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase )
A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase )
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy()
A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy()
A__ = []
A__ = image_embeds.shape[0]
for i in range(lowercase ):
A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ = special_cos_dist[i][concept_idx]
A__ = self.special_care_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A__ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ = cos_dist[i][concept_idx]
A__ = self.concept_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowercase )
result.append(lowercase )
A__ = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
A__ = cosine_distance(lowercase , self.special_care_embeds )
A__ = cosine_distance(lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
A__ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ = torch.any(special_scores > 0 , dim=1 )
A__ = special_care * 0.01
A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 68
| 0
|
'''simple docstring'''
def a_ ( __snake_case : str ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def a_ ( __snake_case : str ) -> bool:
"""simple docstring"""
lowerCamelCase_ =credit_card_number
lowerCamelCase_ =0
lowerCamelCase_ =len(__snake_case ) - 2
for i in range(__snake_case , -1 , -2 ):
# double the value of every second digit
lowerCamelCase_ =int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCamelCase_ =cc_number[:i] + str(__snake_case ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__snake_case ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def a_ ( __snake_case : str ) -> bool:
"""simple docstring"""
lowerCamelCase_ =F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(__snake_case ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(__snake_case ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(__snake_case ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 6
|
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __snake_case :
__lowerCamelCase : str = BlenderbotConfig
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : Optional[int] = """gelu"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : Dict =seq_length
UpperCAmelCase : Optional[Any] =is_training
UpperCAmelCase : List[str] =use_labels
UpperCAmelCase : List[Any] =vocab_size
UpperCAmelCase : Optional[int] =hidden_size
UpperCAmelCase : Tuple =num_hidden_layers
UpperCAmelCase : Any =num_attention_heads
UpperCAmelCase : Optional[int] =intermediate_size
UpperCAmelCase : str =hidden_dropout_prob
UpperCAmelCase : Optional[int] =attention_probs_dropout_prob
UpperCAmelCase : str =max_position_embeddings
UpperCAmelCase : List[Any] =eos_token_id
UpperCAmelCase : Optional[int] =pad_token_id
UpperCAmelCase : Tuple =bos_token_id
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[str] =prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =TFBlenderbotModel(config=snake_case__ ).get_decoder()
UpperCAmelCase : Any =inputs_dict['''input_ids''']
UpperCAmelCase : str =input_ids[:1, :]
UpperCAmelCase : Tuple =inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase : Tuple =inputs_dict['''head_mask''']
UpperCAmelCase : List[Any] =1
# first forward pass
UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
UpperCAmelCase , UpperCAmelCase : str =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : List[Any] =output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Dict =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , )-> str:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase : int =tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : Tuple =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowerCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Dict = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] =TFBlenderbotModelTester(self )
UpperCAmelCase : List[Any] =ConfigTester(self , config_class=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase ):
__lowerCamelCase : List[str] = ["""My friends are cool but they eat too many carbs."""]
__lowerCamelCase : Dict = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase : Optional[int] =self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 348
| 0
|
def UpperCAmelCase_ (_lowerCAmelCase : Optional[Any] = 10_00 ):
__UpperCamelCase : int = 1, 1
__UpperCamelCase : Optional[Any] = 2
while True:
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : Optional[Any] = fa + fa
__UpperCamelCase : Dict = fa, f
index += 1
for _ in str(lowerCamelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 368
|
def UpperCAmelCase_ (_lowerCAmelCase : int = 1_00 ):
__UpperCamelCase : Tuple = n * (n + 1) * (2 * n + 1) / 6
__UpperCamelCase : List[str] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 171
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def A_ ( self : Dict ):
super().setUp()
# fmt: off
snake_case_ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
def A_ ( self : List[str] , **lowercase_ : Optional[Any] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def A_ ( self : int , lowercase_ : Any ):
snake_case_ = '''tester'''
snake_case_ = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def A_ ( self : Union[str, Any] ):
pass
def A_ ( self : int ):
snake_case_ = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
snake_case_ = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
snake_case_ = tokenizer.encode([special_token] , add_special_tokens=lowercase_ )
self.assertEqual(len(lowercase_ ) , 1 )
snake_case_ = tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
self.assertTrue(special_token not in decoded )
def A_ ( self : Any ):
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
snake_case_ ,snake_case_ = self.get_input_output_texts(lowercase_ )
snake_case_ = tokenizer.tokenize(lowercase_ )
snake_case_ = tokenizer.convert_tokens_to_ids(lowercase_ )
snake_case_ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertNotEqual(len(lowercase_ ) , 0 )
snake_case_ = tokenizer.decode(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , lowercase_ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def A_ ( self : Tuple ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def A_ ( self : Tuple ):
pass
| 56
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=_lowerCamelCase ):
snake_case_ = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : int ):
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def A_ ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str ):
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def A_ ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ):
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 56
| 1
|
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
a_ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
a_ = 'CIDAS/clipseg-rd64-refined'
a_ = 'image_segmenter'
a_ = CLIPSegForImageSegmentation
a_ = ['image', 'text']
a_ = ['image']
def __init__( self : List[str] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ['vision'] )
super().__init__(*_a , **_a )
def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
return self.pre_processor(text=[label] , images=[image] , padding=_a , return_tensors='pt' )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Tuple ) -> Any:
with torch.no_grad():
__lowerCAmelCase = self.model(**_a ).logits
return logits
def lowercase ( self : Dict , lowerCAmelCase_ : List[str] ) -> Optional[int]:
__lowerCAmelCase = outputs.cpu().detach().numpy()
__lowerCAmelCase = 0
__lowerCAmelCase = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 361
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[str] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Union[str, Any]=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = ViTConfig()
__lowerCAmelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__lowerCAmelCase = True
__lowerCAmelCase = int(vit_name[-12:-10] )
__lowerCAmelCase = int(vit_name[-9:-6] )
else:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = int(vit_name[-6:-4] )
__lowerCAmelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
__lowerCAmelCase = 192
__lowerCAmelCase = 768
__lowerCAmelCase = 12
__lowerCAmelCase = 3
elif vit_name[9:].startswith('small' ):
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
__lowerCAmelCase = 768
__lowerCAmelCase = 2304
__lowerCAmelCase = 8
__lowerCAmelCase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif vit_name[4:].startswith('huge' ):
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
# load original model from timm
__lowerCAmelCase = timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowerCAmelCase = ViTModel(lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__lowerCAmelCase = DeiTImageProcessor(size=config.image_size )
else:
__lowerCAmelCase = ViTImageProcessor(size=config.image_size )
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = timm_model.forward_features(lowerCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase_, outputs.pooler_output, atol=1E-3 )
else:
__lowerCAmelCase = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 207
| 0
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int=13 , UpperCamelCase__ : Union[str, Any]=30 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : List[str]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : int=10 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : List[str]=0.6 , UpperCamelCase__ : Dict=None , ) -> str:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = mask_ratio
__magic_name__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__magic_name__ = (image_size // patch_size) ** 2
__magic_name__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
__magic_name__ = TFViTMAEModel(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
__magic_name__ = TFViTMAEForPreTraining(UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , training=UpperCamelCase__ )
# expected sequence length = num_patches
__magic_name__ = (self.image_size // self.patch_size) ** 2
__magic_name__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = TFViTMAEForPreTraining(UpperCamelCase__ )
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(UpperCamelCase__ , training=UpperCamelCase__ )
__magic_name__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__)) = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a__ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = TFViTMAEModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
__magic_name__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
__magic_name__ = outputs_dict[0].numpy()
__magic_name__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase__ : int ):
__magic_name__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase__ ):
__magic_name__ = v.numpy()
else:
__magic_name__ = np.array(UpperCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = prepare_numpy_arrays(UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
__magic_name__ = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ = tf.constant(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__magic_name__ = tf_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(UpperCamelCase__ , UpperCamelCase__ ),)
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase__ , """_keras_serializable""" , UpperCamelCase__ )
}
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ = tf.convert_to_tensor(UpperCamelCase__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
__magic_name__ = main_layer_class(UpperCamelCase__ )
__magic_name__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__magic_name__ = tf.keras.Model(UpperCamelCase__ , outputs=main_layer(UpperCamelCase__ ) )
__magic_name__ = model(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = os.path.join(UpperCamelCase__ , """keras_model.h5""" )
model.save(UpperCamelCase__ )
__magic_name__ = tf.keras.models.load_model(
UpperCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase__ , tf.keras.Model )
__magic_name__ = model(UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@slow
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ = outputs.last_hidden_state.numpy()
__magic_name__ = 0
else:
__magic_name__ = outputs.logits.numpy()
__magic_name__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
__magic_name__ = model_class.from_pretrained(UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ = after_outputs["""last_hidden_state"""].numpy()
__magic_name__ = 0
else:
__magic_name__ = after_outputs["""logits"""].numpy()
__magic_name__ = 0
__magic_name__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1E-5 )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
__magic_name__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase__ )
__magic_name__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__magic_name__ = model_class.from_config(model.config )
__magic_name__ = new_model(UpperCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
__magic_name__ = new_model(UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
@slow
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
__magic_name__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase__ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__magic_name__ = ViTMAEConfig()
__magic_name__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(1, num_patches) )
# forward pass
__magic_name__ = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
# verify the logits
__magic_name__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
__magic_name__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
| 88
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" )
__snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
__snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids
__snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
__snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
__snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
__snake_case : Any = -(labels.shape[-1] * loss.item())
__snake_case : List[str] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 326
| 0
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE :List[str] = get_logger(__name__)
class UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
snake_case_ = "all_checks"
snake_case_ = "basic_checks"
snake_case_ = "no_checks"
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ , a_=None ) -> Optional[Any]:
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(a_ ) - set(a_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(a_ ) - set(a_ ) ) )
if len(set(a_ ) - set(a_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(a_ ) - set(a_ ) ) )
__A = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__A = " for " + verification_name if verification_name is not None else ""
if len(a_ ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(a_ ) - set(a_ ) ) > 0:
raise ExpectedMoreSplits(str(set(a_ ) - set(a_ ) ) )
if len(set(a_ ) - set(a_ ) ) > 0:
raise UnexpectedSplits(str(set(a_ ) - set(a_ ) ) )
__A = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(a_ ) > 0:
raise NonMatchingSplitsSizesError(str(a_ ) )
logger.info("All the splits matched successfully." )
def UpperCAmelCase ( a_ , a_ = True ) -> dict:
"""simple docstring"""
if record_checksum:
__A = shaaaa()
with open(a_ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b"" ):
m.update(a_ )
__A = m.hexdigest()
else:
__A = None
return {"num_bytes": os.path.getsize(a_ ), "checksum": checksum}
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 124
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
__A , __A = analyze_text(a_ )
__A = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
__A = sum(single_char_strings.values() )
# one length string
__A = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__A = single_char_strings[ch]
__A = my_str / all_sum
my_fir_sum += prob * math.loga(a_ ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
__A = sum(two_char_strings.values() )
__A = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__A = cha + cha
if sequence in two_char_strings:
__A = two_char_strings[sequence]
__A = int(a_ ) / all_sum
my_sec_sum += prob * math.loga(a_ )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def UpperCAmelCase ( a_ ) -> tuple[dict, dict]:
"""simple docstring"""
__A = Counter() # type: ignore
__A = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 124
| 1
|
def __lowerCAmelCase ( a__ ) -> bool:
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def __lowerCAmelCase ( a__ ) -> bool:
__a = credit_card_number
__a = 0
__a = len(a__ ) - 2
for i in range(a__ , -1 , -2 ):
# double the value of every second digit
__a = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__a = cc_number[:i] + str(a__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(a__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __lowerCAmelCase ( a__ ) -> bool:
__a = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(a__ ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(a__ ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(a__ ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 6
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Optional[int] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A( a ):
snake_case_ = '''levit'''
def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = image_size
__a = num_channels
__a = kernel_size
__a = stride
__a = padding
__a = hidden_sizes
__a = num_attention_heads
__a = depths
__a = key_dim
__a = drop_path_rate
__a = patch_size
__a = attention_ratio
__a = mlp_ratio
__a = initializer_range
__a = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 6
| 1
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : Any = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__lowerCAmelCase ) )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : int = [sequences]
_lowerCamelCase : Union[str, Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__lowerCAmelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_lowercase)
class __snake_case ( _lowercase):
def __init__( self : Union[str, Any] , __lowerCAmelCase : str=ZeroShotClassificationArgumentHandler() , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = args_parser
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=TruncationStrategy.ONLY_FIRST , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
_lowerCamelCase : Optional[Any] = self.tokenizer.eos_token
try:
_lowerCamelCase : Any = self.tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , )
except Exception as e:
if "too short" in str(__lowerCAmelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_lowerCamelCase : Dict = self.tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def SCREAMING_SNAKE_CASE ( self : str , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if kwargs.get('''multi_class''' , __lowerCAmelCase ) is not None:
_lowerCamelCase : List[Any] = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
_lowerCamelCase : List[Any] = {}
if "candidate_labels" in kwargs:
_lowerCamelCase : Any = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
_lowerCamelCase : Optional[Any] = kwargs['''hypothesis_template''']
_lowerCamelCase : Optional[Any] = {}
if "multi_label" in kwargs:
_lowerCamelCase : Union[str, Any] = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self : Any , __lowerCAmelCase : Union[str, List[str]] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : Tuple , ):
"""simple docstring"""
if len(__lowerCAmelCase ) == 0:
pass
elif len(__lowerCAmelCase ) == 1 and "candidate_labels" not in kwargs:
_lowerCamelCase : Tuple = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : int="This example is {}." ):
"""simple docstring"""
_lowerCamelCase : Any = self._args_parser(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
_lowerCamelCase : Tuple = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__lowerCAmelCase ) - 1,
**model_input,
}
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = inputs['''candidate_label''']
_lowerCamelCase : List[str] = inputs['''sequence''']
_lowerCamelCase : Any = {k: inputs[k] for k in self.tokenizer.model_input_names}
_lowerCamelCase : str = self.model(**__lowerCAmelCase )
_lowerCamelCase : Tuple = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = [outputs['''candidate_label'''] for outputs in model_outputs]
_lowerCamelCase : Optional[int] = [outputs['''sequence'''] for outputs in model_outputs]
_lowerCamelCase : Any = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
_lowerCamelCase : Optional[int] = logits.shape[0]
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
_lowerCamelCase : List[str] = N // n
_lowerCamelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__lowerCAmelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_lowerCamelCase : Optional[int] = self.entailment_id
_lowerCamelCase : Optional[Any] = -1 if entailment_id == 0 else 0
_lowerCamelCase : List[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
_lowerCamelCase : str = np.exp(__lowerCAmelCase ) / np.exp(__lowerCAmelCase ).sum(-1 , keepdims=__lowerCAmelCase )
_lowerCamelCase : Tuple = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_lowerCamelCase : Any = reshaped_outputs[..., self.entailment_id]
_lowerCamelCase : Tuple = np.exp(__lowerCAmelCase ) / np.exp(__lowerCAmelCase ).sum(-1 , keepdims=__lowerCAmelCase )
_lowerCamelCase : List[Any] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 358
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def snake_case_ ( A_ : str, A_ : Tuple, A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = state_dict.pop(A_ )
_lowerCamelCase : Union[str, Any] = val
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCamelCase : List[Any] = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''' )
_lowerCamelCase : int = value
else:
_lowerCamelCase : List[str] = value
return new_state_dict
def snake_case_ ( A_ : Optional[int], A_ : List[str]=False ):
'''simple docstring'''
_lowerCamelCase : Any = ''''''
if is_panoptic:
_lowerCamelCase : Optional[Any] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowerCamelCase : Dict = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[:2_56, :]
_lowerCamelCase : int = in_proj_bias[:2_56]
_lowerCamelCase : str = in_proj_weight[2_56:5_12, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[2_56:5_12]
_lowerCamelCase : List[Any] = in_proj_weight[-2_56:, :]
_lowerCamelCase : List[str] = in_proj_bias[-2_56:]
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : Any = Image.open(requests.get(A_, stream=A_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( A_ : Optional[Any], A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_lowerCamelCase : Union[str, Any] = '''resnet101'''
if "dc5" in model_name:
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Tuple = '''panoptic''' in model_name
if is_panoptic:
_lowerCamelCase : Optional[int] = 2_50
else:
_lowerCamelCase : int = 91
_lowerCamelCase : List[str] = '''huggingface/label-files'''
_lowerCamelCase : Any = '''coco-detection-id2label.json'''
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) )
_lowerCamelCase : List[str] = {int(A_ ): v for k, v in idalabel.items()}
_lowerCamelCase : List[str] = idalabel
_lowerCamelCase : str = {v: k for k, v in idalabel.items()}
# load image processor
_lowerCamelCase : int = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
_lowerCamelCase : Any = ConditionalDetrImageProcessor(format=A_ )
# prepare image
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : str = image_processor(images=A_, return_tensors='''pt''' )
_lowerCamelCase : Union[str, Any] = encoding['''pixel_values''']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
_lowerCamelCase : int = torch.hub.load('''DeppMeng/ConditionalDETR''', A_, pretrained=A_ ).eval()
_lowerCamelCase : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_lowerCamelCase : Optional[Any] = '''conditional_detr.''' + src
rename_key(A_, A_, A_ )
_lowerCamelCase : Dict = rename_backbone_keys(A_ )
# query, key and value matrices need special treatment
read_in_q_k_v(A_, is_panoptic=A_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase : Optional[int] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
_lowerCamelCase : List[Any] = state_dict.pop(A_ )
_lowerCamelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCamelCase : List[str] = state_dict.pop(A_ )
_lowerCamelCase : Optional[Any] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
_lowerCamelCase : Optional[Any] = state_dict.pop(A_ )
_lowerCamelCase : Any = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
_lowerCamelCase : int = state_dict.pop(A_ )
_lowerCamelCase : str = val
# finally, create HuggingFace model and load state dict
_lowerCamelCase : Dict = ConditionalDetrForSegmentation(A_ ) if is_panoptic else ConditionalDetrForObjectDetection(A_ )
model.load_state_dict(A_ )
model.eval()
model.push_to_hub(repo_id=A_, organization='''DepuMeng''', commit_message='''Add model''' )
# verify our conversion
_lowerCamelCase : Dict = conditional_detr(A_ )
_lowerCamelCase : Optional[int] = model(A_ )
assert torch.allclose(outputs.logits, original_outputs['''pred_logits'''], atol=1E-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs['''pred_boxes'''], atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['''pred_masks'''], atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 175
| 0
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = 1
@register_to_config
def __init__( self , __A = 2000 , __A = 0.15 , __A = 0.01 , __A = 1_348.0 , __A = 1E-5 , __A = 1 , ) -> str:
# standard deviation of the initial noise distribution
a =sigma_max
# setable values
a =None
self.set_sigmas(__A , __A , __A , __A )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> torch.FloatTensor:
return sample
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = None ) -> Union[str, Any]:
a =sampling_eps if sampling_eps is not None else self.config.sampling_eps
a =torch.linspace(1 , __A , __A , device=__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = None , __A = None ) -> Union[str, Any]:
a =sigma_min if sigma_min is not None else self.config.sigma_min
a =sigma_max if sigma_max is not None else self.config.sigma_max
a =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__A , __A )
a =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
a =torch.exp(torch.linspace(math.log(__A ) , math.log(__A ) , __A ) )
a =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Any:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A = None , __A = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
a =timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
a =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
a =timesteps.to(self.discrete_sigmas.device )
a =self.discrete_sigmas[timesteps].to(sample.device )
a =self.get_adjacent_sigma(__A , __A ).to(sample.device )
a =torch.zeros_like(__A )
a =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
a =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
a =diffusion.unsqueeze(-1 )
a =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
a =randn_tensor(
sample.shape , layout=sample.layout , generator=__A , device=sample.device , dtype=sample.dtype )
a =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
a =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__A , prev_sample_mean=__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , __A = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
a =randn_tensor(sample.shape , layout=sample.layout , generator=__A ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
a =torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
a =torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
a =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
a =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
a =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
a =step_size.unsqueeze(-1 )
a =sample + step_size * model_output
a =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
a =timesteps.to(original_samples.device )
a =self.discrete_sigmas.to(original_samples.device )[timesteps]
a =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__A ) * sigmas[:, None, None, None]
)
a =noise + original_samples
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 81
|
"""simple docstring"""
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> bool:
UpperCAmelCase__ : Any = len(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCAmelCase__ : int = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCAmelCase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCAmelCase__ : str = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCAmelCase__ : Optional[int] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171
| 0
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowerCamelCase : Any =[
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
for pegasus_name, hf_name in PATTERNS:
UpperCamelCase__ : Union[str, Any] = k.replace(_UpperCAmelCase , _UpperCAmelCase )
return k
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
UpperCamelCase__ : Any = DEFAULTS.copy()
cfg_kwargs.update(_UpperCAmelCase )
UpperCamelCase__ : Tuple = PegasusConfig(**_UpperCAmelCase )
UpperCamelCase__ : str = PegasusForConditionalGeneration(_UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = torch_model.model.state_dict()
UpperCamelCase__ : Any = {}
for k, v in tf_weights.items():
UpperCamelCase__ : List[str] = rename_state_dict_key(_UpperCAmelCase )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
UpperCamelCase__ : List[str] = v.T
UpperCamelCase__ : Any = torch.tensor(_UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
UpperCamelCase__ : Union[str, Any] = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
UpperCamelCase__ : List[Any] = mapping['shared.weight']
UpperCamelCase__ : Optional[int] = mapping['shared.weight']
UpperCamelCase__ : Optional[int] = {k: torch.zeros_like(_UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**_UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = torch_model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
UpperCamelCase__ : Optional[int] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCamelCase__ : Optional[int] = tf.train.list_variables(_UpperCAmelCase )
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : str = ['Adafactor', 'global_step']
for name, shape in tqdm(_UpperCAmelCase , desc="converting tf checkpoint to dict" ):
UpperCamelCase__ : List[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase__ : Optional[Any] = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase__ : str = array
return tf_weights
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase__ : Any = Path(_UpperCAmelCase ).parent.name
UpperCamelCase__ : int = task_specific_params[f'summarization_{dataset}']['max_position_embeddings']
UpperCamelCase__ : List[Any] = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=_UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCAmelCase )
# convert model
UpperCamelCase__ : List[Any] = get_tf_weights_as_numpy(_UpperCAmelCase )
UpperCamelCase__ : Optional[int] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
UpperCamelCase__ : int = task_specific_params
UpperCamelCase__ : Optional[Any] = convert_pegasus(_UpperCAmelCase , _UpperCAmelCase )
torch_model.save_pretrained(_UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(_UpperCAmelCase , Path(_UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
lowerCamelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase : int =parser.parse_args()
if args.save_dir is None:
lowerCamelCase : int =Path(args.tf_ckpt_path).parent.name
lowerCamelCase : str =os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 359
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
if len(__lowerCAmelCase ) == 0:
return False
UpperCamelCase__ : Any = len(__lowerCAmelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __lowerCAmelCase )
else:
return binary_search(a_list[midpoint + 1 :] , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Any =input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase : Dict =[int(item.strip()) for item in user_input.split(''',''')]
lowerCamelCase : List[str] =int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase : Union[str, Any] ='''''' if binary_search(sequence, target) else '''not '''
print(F"""{target} was {not_str}found in {sequence}""")
| 196
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A( a , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ['''image''']
snake_case_ = ['''image''']
snake_case_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
snake_case_ = False
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return 8
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__a = CLIPVisionModel(_snake_case )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=_snake_case , do_normalize=_snake_case , do_resize=_snake_case , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__a = PriorTransformer(**_snake_case )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__a = ShapERenderer(**_snake_case )
return model
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_image_processor
__a = self.dummy_renderer
__a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=_snake_case , clip_sample=_snake_case , clip_sample_range=1.0 , )
__a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=0 ) -> List[Any]:
'''simple docstring'''
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith('''mps''' ):
__a = torch.manual_seed(_snake_case )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__a = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**_snake_case )
__a = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = pipe(**self.get_dummy_inputs(_snake_case ) )
__a = output.images[0]
__a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__a = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = torch_device == '''cpu'''
__a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_snake_case , relax_max_difference=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**_snake_case )
__a = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = 1
__a = 2
__a = self.get_dummy_inputs(_snake_case )
for key in inputs.keys():
if key in self.batch_params:
__a = batch_size * [inputs[key]]
__a = pipe(**_snake_case , num_images_per_prompt=_snake_case )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
__a = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
__a = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = torch.Generator(device=_snake_case ).manual_seed(0 )
__a = pipe(
_snake_case , generator=_snake_case , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 6
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase__ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
lowercase__ = model.state_dict()
def to_tf_var_name(lowerCamelCase_ ):
for patt, repl in iter(lowerCamelCase_ ):
lowercase__ = name.replace(lowerCamelCase_ , lowerCamelCase_ )
return F"""bert/{name}"""
def create_tf_var(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=lowerCamelCase_ , shape=tensor.shape , name=lowerCamelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(lowerCamelCase_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=lowerCamelCase_ , name=lowerCamelCase_ , session=lowerCamelCase_ )
tf.keras.backend.set_value(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = session.run(lowerCamelCase_ )
print(F"""Successfully created {tf_name}: {np.allclose(lowerCamelCase_ , lowerCamelCase_ )}""" )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def a ( lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory in which to save tensorflow model''' )
lowercase__ = parser.parse_args(lowerCamelCase_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 207
| 0
|
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowercase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowercase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
lowercase__ :Any = 8.3_144_598
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase__ :Optional[Any] = 300
lowercase__ :List[Any] = 28
lowercase__ :Dict = rms_speed_of_molecule(temperature, molar_mass)
print(F'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 97
| 0
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = "cpu" ,lowercase = None ) -> None:
snake_case : int = torch.load(lowercase ,map_location=lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowercase ,torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
snake_case : Dict = v.half()
if save_path is None: # overwrite src_path
snake_case : Optional[Any] = src_path
torch.save(lowercase ,lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 124
|
from typing import List
from .keymap import KEYMAP, get_character
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
def decorator(lowercase ):
snake_case : Tuple = getattr(lowercase ,"""handle_key""" ,[] )
handle += [key]
setattr(lowercase ,"""handle_key""" ,lowercase )
return func
return decorator
def SCREAMING_SNAKE_CASE__ ( *lowercase ) -> List[str]:
def decorator(lowercase ):
snake_case : Optional[int] = getattr(lowercase ,"""handle_key""" ,[] )
handle += keys
setattr(lowercase ,"""handle_key""" ,lowercase )
return func
return decorator
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __new__( cls , A , A , A ) -> str:
snake_case : int = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
snake_case : Dict = getattr(A , """handle_key""" , [] )
for key in handled_keys:
snake_case : Any = value
return new_cls
@staticmethod
def UpperCAmelCase ( cls ) -> List[str]:
snake_case : Tuple = get_character()
if char != KEYMAP["undefined"]:
snake_case : str = ord(A )
snake_case : Optional[Any] = cls.key_handler.get(A )
if handler:
snake_case : Optional[Any] = char
return handler(cls )
else:
return None
def SCREAMING_SNAKE_CASE__ ( cls ) -> Tuple:
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 124
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['ConditionalDetrFeatureExtractor']
lowerCAmelCase_ = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 116
|
class _A : # Public class to implement a graph
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
"""simple docstring"""
lowercase : Tuple = row
lowercase : Union[str, Any] = col
lowercase : int = graph
def __a ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __a ( self : int , _A : int , _A : int , _A : list[list[bool]] ) -> None:
"""simple docstring"""
lowercase : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase : Dict = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __a ( self : List[str] ) -> int: # And finally, count all islands.
"""simple docstring"""
lowercase : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase : Optional[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count
| 116
| 1
|
"""simple docstring"""
def __lowercase ( _a , _a , _a , _a ):
# Return True if there is node that has not iterated.
snake_case_ : List[str] = [False] * len(_a )
snake_case_ : List[str] = []
queue.append(_a )
snake_case_ : str = True
while queue:
snake_case_ : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
snake_case_ : Union[str, Any] = True
snake_case_ : Tuple = u
return visited[t]
def __lowercase ( _a , _a , _a ):
# This array is filled by BFS and to store path
snake_case_ : int = [-1] * (len(_a ))
snake_case_ : int = 0
while bfs(_a , _a , _a , _a ):
snake_case_ : int = float('''Inf''' )
snake_case_ : Any = sink
while s != source:
# Find the minimum value in select path
snake_case_ : int = min(_a , graph[parent[s]][s] )
snake_case_ : Optional[Any] = parent[s]
max_flow += path_flow
snake_case_ : Any = sink
while v != source:
snake_case_ : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case_ : Optional[int] = parent[v]
return max_flow
lowercase__ : Dict = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowercase__ ,lowercase__ : Dict = 0, 5
print(ford_fulkerson(graph, source, sink))
| 264
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
a_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _lowercase ( datasets.BuilderConfig ):
lowercase = None
def __lowercase ( lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : List[int] , ):
import pyspark
def generate_fn():
UpperCamelCase_ : Dict = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
UpperCamelCase_ : Tuple = df_with_partition_id.select('*' ).where(F"part_id = {partition_id}" ).drop('part_id' )
UpperCamelCase_ : Union[str, Any] = partition_df.collect()
UpperCamelCase_ : Any = 0
for row in rows:
yield F"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class _lowercase ( _BaseExamplesIterable ):
def __init__( self : Optional[int] , snake_case : "pyspark.sql.DataFrame" , snake_case : Tuple=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = df
UpperCamelCase_ : int = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCamelCase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Optional[int] ) -> Any:
"""simple docstring"""
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : np.random.Generator ) -> "SparkExamplesIterable":
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(snake_case )
return SparkExamplesIterable(self.df , partition_order=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : int , snake_case : int ) -> "SparkExamplesIterable":
"""simple docstring"""
UpperCamelCase_ : Tuple = self.split_shard_indices_by_worker(snake_case , snake_case )
return SparkExamplesIterable(self.df , partition_order=snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
"""simple docstring"""
return len(self.partition_order )
class _lowercase ( datasets.DatasetBuilder ):
lowercase = SparkConfig
def __init__( self : List[Any] , snake_case : "pyspark.sql.DataFrame" , snake_case : str = None , snake_case : str = None , **snake_case : Optional[Any] , ) -> List[str]:
"""simple docstring"""
import pyspark
UpperCamelCase_ : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCamelCase_ : str = df
UpperCamelCase_ : Tuple = working_dir
super().__init__(
cache_dir=snake_case , config_name=str(self.df.semanticHash() ) , **snake_case , )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
"""simple docstring"""
def create_cache_and_write_probe(snake_case : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=snake_case )
UpperCamelCase_ : Tuple = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(snake_case , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCamelCase_ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(snake_case ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : datasets.download.download_manager.DownloadManager ) -> Optional[int]:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
import pyspark
def get_arrow_batch_size(snake_case : Dict ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
UpperCamelCase_ : List[str] = self.df.count()
UpperCamelCase_ : Union[str, Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCamelCase_ : str = (
self.df.limit(snake_case )
.repartition(1 )
.mapInArrow(snake_case , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCamelCase_ : Optional[int] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCamelCase_ : Optional[Any] = min(snake_case , int(approx_total_size / max_shard_size ) )
UpperCamelCase_ : int = self.df.repartition(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : str , snake_case : str , snake_case : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
"""simple docstring"""
import pyspark
UpperCamelCase_ : List[Any] = ParquetWriter if file_format == 'parquet' else ArrowWriter
UpperCamelCase_ : List[str] = os.path.join(self._working_dir , os.path.basename(snake_case ) ) if self._working_dir else fpath
UpperCamelCase_ : Union[str, Any] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCamelCase_ : Union[str, Any] = self.config.features
UpperCamelCase_ : Any = self._writer_batch_size
UpperCamelCase_ : Dict = self._fs.storage_options
def write_arrow(snake_case : List[str] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCamelCase_ : Any = pyspark.TaskContext().taskAttemptId()
UpperCamelCase_ : str = next(snake_case , snake_case )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
UpperCamelCase_ : Any = 0
UpperCamelCase_ : Optional[Any] = writer_class(
features=snake_case , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=snake_case , storage_options=snake_case , embed_local_files=snake_case , )
UpperCamelCase_ : str = pa.Table.from_batches([first_batch] )
writer.write_table(snake_case )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCamelCase_, UpperCamelCase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
UpperCamelCase_ : Union[str, Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=snake_case , storage_options=snake_case , embed_local_files=snake_case , )
UpperCamelCase_ : Optional[Any] = pa.Table.from_batches([batch] )
writer.write_table(snake_case )
if writer._num_bytes > 0:
UpperCamelCase_, UpperCamelCase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(snake_case ) ):
UpperCamelCase_ : Dict = os.path.join(os.path.dirname(snake_case ) , os.path.basename(snake_case ) )
shutil.move(snake_case , snake_case )
UpperCamelCase_ : int = (
self.df.mapInArrow(snake_case , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : "datasets.SplitGenerator" , snake_case : str = "arrow" , snake_case : Optional[Union[str, int]] = None , snake_case : Optional[int] = None , **snake_case : Any , ) -> int:
"""simple docstring"""
self._validate_cache_dir()
UpperCamelCase_ : Optional[int] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(snake_case )
UpperCamelCase_ : List[str] = not is_remote_filesystem(self._fs )
UpperCamelCase_ : List[Any] = os.path.join if is_local else posixpath.join
UpperCamelCase_ : Optional[int] = '-TTTTT-SSSSS-of-NNNNN'
UpperCamelCase_ : Dict = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
UpperCamelCase_ : int = path_join(self._output_dir , snake_case )
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : Optional[Any] = []
UpperCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(snake_case , snake_case , snake_case ):
(
(
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
),
) : Optional[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(snake_case )
UpperCamelCase_ : Optional[Any] = total_num_examples
UpperCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards." )
if total_shards > 1:
UpperCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCamelCase_ : int = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
snake_case : int , snake_case : int , snake_case : int , ):
rename(
snake_case , fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace('TTTTT-SSSSS' , f"{global_shard_id:05d}" ).replace('NNNNN' , f"{total_shards:05d}" ) , )
UpperCamelCase_ : Any = []
UpperCamelCase_ : Optional[int] = 0
for i in range(len(snake_case ) ):
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(snake_case ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(snake_case , len(snake_case ) ).map(lambda snake_case : _rename_shard(*snake_case ) ).collect()
else:
# don't use any pattern
UpperCamelCase_ : Tuple = 0
UpperCamelCase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace(snake_case , '' ) , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 175
| 0
|
"""simple docstring"""
def a_ ( _lowercase , _lowercase ):
_UpperCamelCase : Any = len(_lowercase )
_UpperCamelCase : Tuple = []
for i in range(len(_lowercase ) - pat_len + 1 ):
_UpperCamelCase : int = True
for j in range(_lowercase ):
if s[i + j] != pattern[j]:
_UpperCamelCase : Dict = False
break
if match_found:
position.append(_lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 128
|
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCamelCase_ =5
UpperCamelCase_ =10
@require_sentencepiece
@require_tokenizers
class _a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase = SpeechaTextTokenizer
UpperCamelCase = False
UpperCamelCase = True
def snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
_UpperCamelCase : Dict = sp.SentencePieceProcessor()
spm_model.Load(lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase__ ) )]
_UpperCamelCase : Tuple = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__ ) ) ) )
_UpperCamelCase : Dict = Path(self.tmpdirname )
save_json(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
_UpperCamelCase : List[str] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase : List[str] = '''<pad>'''
_UpperCamelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ), lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ), lowerCAmelCase__ )
def snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<s>''' )
self.assertEqual(vocab_keys[1], '''<pad>''' )
self.assertEqual(vocab_keys[-1], '''j''' )
self.assertEqual(len(lowerCAmelCase__ ), 1_0_0_1 )
def snake_case ( self : Any ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_0_1 )
def snake_case ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[int] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
_UpperCamelCase : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ), [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6], )
_UpperCamelCase : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
_UpperCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], )
@slow
def snake_case ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Any = {'''input_ids''': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', )
@require_sentencepiece
class _a ( unittest.TestCase ):
UpperCamelCase = '''valhalla/s2t_mustc_multilinguial_medium'''
UpperCamelCase = '''C\'est trop cool'''
UpperCamelCase = '''Esto es genial'''
@classmethod
def snake_case ( cls : Dict ) -> str:
'''simple docstring'''
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def snake_case ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 1_1 )
def snake_case ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size, 1_0_0_0_0 )
def snake_case ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__, self.tokenizer.all_special_ids )
_UpperCamelCase : List[Any] = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
_UpperCamelCase : Optional[Any] = self.tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__, lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase__ )
def snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Dict = '''fr'''
_UpperCamelCase : int = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0], lowerCAmelCase__ )
self.assertEqual(encoded[-1], self.tokenizer.eos_token_id )
def snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] )
_UpperCamelCase : List[str] = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
| 128
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.