code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
a_ : List[str] = ["""image_processor""", """tokenizer"""]
a_ : Dict = """FlavaImageProcessor"""
a_ : List[str] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , a_ : Any=None , a_ : Optional[Any]=None , **a_ : List[Any] ):
lowerCAmelCase_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase_ : int = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
lowerCAmelCase_ : Optional[Any] = self.image_processor
def __call__( self : int , a_ : Any = None , a_ : int = None , a_ : str = True , a_ : str = False , a_ : List[str] = False , a_ : List[str] = None , a_ : Dict = 0 , a_ : Any = None , a_ : Tuple = None , a_ : Optional[Any] = None , a_ : Dict = None , a_ : Tuple = None , a_ : Optional[Any] = False , a_ : List[str] = False , a_ : Any = False , a_ : List[str] = False , a_ : Optional[Any] = True , a_ : str = None , **a_ : List[Any] , ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCAmelCase_ : Any = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
if images is not None:
lowerCAmelCase_ : List[Any] = self.image_processor(
a_ , return_image_mask=a_ , return_codebook_pixels=a_ , return_tensors=a_ , **a_ , )
if text is not None and images is not None:
encoding.update(a_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def lowerCamelCase ( self : str , *a_ : Union[str, Any] , **a_ : List[Any] ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def lowerCamelCase ( self : Union[str, Any] , *a_ : List[str] , **a_ : Any ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Dict = self.tokenizer.model_input_names
lowerCAmelCase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Tuple ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 241
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'summarization'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = 'rouge2'
def __init__( self , lowercase , **lowercase ) -> str:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase = 0
lowerCAmelCase = defaultdict(lowercase )
lowerCAmelCase = self.config.model_type
lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase = get_git_info()["""repo_sha"""]
lowerCAmelCase = hparams.num_workers
lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase = self.decoder_start_token_id
lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCAmelCase = False
lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase = self.hparams.eval_max_gen_length
else:
lowerCAmelCase = self.model.config.max_length
lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _snake_case ( self , lowercase ) -> Dict[str, List[str]]:
lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCAmelCase = True
return readable_batch
def _snake_case ( self , lowercase , **lowercase ) -> Union[str, Any]:
return self.model(lowercase , **lowercase )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = self.tokenizer.pad_token_id
lowerCAmelCase , lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , lowercase ):
lowerCAmelCase = self.model._shift_right(lowercase )
else:
lowerCAmelCase = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase = decoder_input_ids
self.save_readable_batch(lowercase )
lowerCAmelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase = nn.functional.log_softmax(lowercase , dim=-1 )
lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def _snake_case ( self ) -> int:
return self.tokenizer.pad_token_id
def _snake_case ( self , lowercase , lowercase ) -> Dict:
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].shape[0]
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase , lowercase="val" ) -> Dict:
self.step_count += 1
lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase = losses["""loss"""]
lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCAmelCase = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return calculate_rouge(lowercase , lowercase )
def _snake_case ( self , lowercase ) -> dict:
lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCAmelCase = self.ids_to_clean_text(lowercase )
lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
lowerCAmelCase = self.calc_generative_metrics(lowercase , lowercase )
lowerCAmelCase = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.validation_epoch_end(lowercase , prefix="""test""" )
def _snake_case ( self , lowercase ) -> SeqaSeqDataset:
lowerCAmelCase = self.n_obs[type_path]
lowerCAmelCase = self.target_lens[type_path]
lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
lowerCAmelCase = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def _snake_case ( self ) -> DataLoader:
lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase )
parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowercase , default=500 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
"""--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'translation'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ['bleu']
_SCREAMING_SNAKE_CASE = 'bleu'
def __init__( self , lowercase , **lowercase ) -> Union[str, Any]:
super().__init__(lowercase , **lowercase )
lowerCAmelCase = hparams.src_lang
lowerCAmelCase = hparams.tgt_lang
def _snake_case ( self , lowercase , lowercase ) -> dict:
return calculate_bleu(lowercase , lowercase )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE )
lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase = False
lowerCAmelCase = args.val_metric == """loss"""
lowerCAmelCase = generic_train(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCAmelCase = """"""
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) )
if checkpoints:
lowerCAmelCase = checkpoints[-1]
lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 46
| 0
|
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''')
__lowerCAmelCase = str(bin(lowerCamelCase))[2:] # remove the leading "0b"
__lowerCAmelCase = str(bin(lowerCamelCase))[2:] # remove the leading "0b"
__lowerCAmelCase = max(len(lowerCamelCase), len(lowerCamelCase))
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1'''))
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase), b_binary.zfill(lowerCamelCase)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9
|
'''simple docstring'''
import argparse
import datetime
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
__lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCamelCase) < 1_1:
raise ValueError('''Must be 10 characters long''')
# Get month
__lowerCAmelCase = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 1_3:
raise ValueError('''Month must be between 1 - 12''')
__lowerCAmelCase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''')
# Get day
__lowerCAmelCase = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 3_2:
raise ValueError('''Date must be between 1 - 31''')
# Get second separator
__lowerCAmelCase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''')
# Get year
__lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''')
# Get datetime obj for validation
__lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase))
# Start math
if m <= 2:
__lowerCAmelCase = y - 1
__lowerCAmelCase = m + 1_2
# maths var
__lowerCAmelCase = int(str(lowerCamelCase)[:2])
__lowerCAmelCase = int(str(lowerCamelCase)[2:])
__lowerCAmelCase = int(2.6 * m - 5.39)
__lowerCAmelCase = int(c / 4)
__lowerCAmelCase = int(k / 4)
__lowerCAmelCase = int(d + k)
__lowerCAmelCase = int(t + u + v + x)
__lowerCAmelCase = int(z - (2 * c))
__lowerCAmelCase = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''')
# Response
__lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : List[str] = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
_UpperCAmelCase : Dict = parser.parse_args()
zeller(args.date_input)
| 9
| 1
|
from __future__ import annotations
def _a ( a :list , a :int | None = None , a :int | None = None ) -> None:
if start is None:
a = 0
if end is None:
a = len(a ) - 1
if start >= end:
return
a = (start + end) // 2
slowsort(a , a , a )
slowsort(a , mid + 1 , a )
if sequence[end] < sequence[mid]:
a , a = sequence[mid], sequence[end]
slowsort(a , a , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0
| 1
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
snake_case : int = logging.get_logger(__name__)
class _snake_case ( snake_case ):
def __init__( self , **_a ):
requires_backends(self , ["bs4"] )
super().__init__(**_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Union[str, Any] = []
__magic_name__ : Optional[int] = []
__magic_name__ : int = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__magic_name__ : Union[str, Any] = parent.find_all(child.name , recursive=_a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_a ) else next(i for i, s in enumerate(_a , 1 ) if s is child ) )
__magic_name__ : List[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = BeautifulSoup(_a , "html.parser" )
__magic_name__ : Union[str, Any] = []
__magic_name__ : int = []
__magic_name__ : int = []
for element in html_code.descendants:
if type(_a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__magic_name__ : Optional[Any] = html.unescape(_a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_a )
__magic_name__ : List[Any] = self.xpath_soup(_a )
stringaxtag_seq.append(_a )
stringaxsubs_seq.append(_a )
if len(_a ) != len(_a ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(_a ) != len(_a ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : List[Any] = ""
for tagname, subs in zip(_a , _a ):
xpath += f'''/{tagname}'''
if subs != 0:
xpath += f'''[{subs}]'''
return xpath
def __call__( self , _a ):
__magic_name__ : Dict = False
# Check that strings has a valid type
if isinstance(_a , _a ):
__magic_name__ : Tuple = True
elif isinstance(_a , (list, tuple) ):
if len(_a ) == 0 or isinstance(html_strings[0] , _a ):
__magic_name__ : List[str] = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f'''but is of type {type(_a )}.''' )
__magic_name__ : List[str] = bool(isinstance(_a , (list, tuple) ) and (isinstance(html_strings[0] , _a )) )
if not is_batched:
__magic_name__ : int = [html_strings]
# Get nodes + xpaths
__magic_name__ : Dict = []
__magic_name__ : str = []
for html_string in html_strings:
__magic_name__ : List[str] = self.get_three_from_single(_a )
nodes.append(_a )
__magic_name__ : int = []
for node, tag_list, sub_list in zip(_a , _a , _a ):
__magic_name__ : int = self.construct_xpath(_a , _a )
xpath_strings.append(_a )
xpaths.append(_a )
# return as Dict
__magic_name__ : Optional[int] = {"nodes": nodes, "xpaths": xpaths}
__magic_name__ : Union[str, Any] = BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs
| 350
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
snake_case : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class _snake_case ( datasets.BuilderConfig ):
UpperCamelCase__ = 1_0000
UpperCamelCase__ = None
UpperCamelCase__ = None
class _snake_case ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ = ParquetConfig
def SCREAMING_SNAKE_CASE ( self ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self , _a ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__magic_name__ : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
__magic_name__ : Dict = data_files
if isinstance(_a , _a ):
__magic_name__ : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__magic_name__ : Tuple = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
__magic_name__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
__magic_name__ : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__magic_name__ : Optional[int] = [dl_manager.iter_files(_a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_a ):
with open(_a , "rb" ) as f:
__magic_name__ : str = datasets.Features.from_arrow_schema(pq.read_schema(_a ) )
break
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"files": files} ) )
return splits
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__magic_name__ : str = table_cast(_a , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , "rb" ) as f:
__magic_name__ : List[str] = pq.ParquetFile(_a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__magic_name__ : Union[str, Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(_a )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise
| 41
| 0
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
_a : int = True
except (ImportError, ModuleNotFoundError):
_a : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> str:
re.sub("""<n>""" ,"""""" ,_lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCamelCase ) )
| 44
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 1
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowerCAmelCase_ = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def __magic_name__ ( ) -> Union[str, Any]:
snake_case = Github(os.environ['GITHUB_TOKEN'] )
snake_case = g.get_repo('huggingface/diffusers' )
snake_case = repo.get_issues(state='open' )
for issue in open_issues:
snake_case = sorted(issue.get_comments() , key=lambda A : i.created_at , reverse=A )
snake_case = comments[0] if len(A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 352
|
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( _A = 100_0000 ):
a : Any = 1
a : Tuple = 1
a : Optional[Any] = {1: 1}
for inputa in range(2 , A__ ):
a : Any = 0
a : List[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
a : Dict = (3 * number) + 1
counter += 1
if inputa not in counters:
a : List[str] = counter
if counter > pre_counter:
a : str = inputa
a : int = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 297
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : List[Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
| 0
|
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
SCREAMING_SNAKE_CASE__ = str(bin(_A ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = str(bin(_A ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = max(len(_A ) , len(_A ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = str(_A )
while len(_A ) != 1:
SCREAMING_SNAKE_CASE__ = [int(_A ) for i in num_string]
SCREAMING_SNAKE_CASE__ = 1
for i in range(0 , len(_A ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE__ = str(_A )
steps += 1
return steps
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = str(_A )
while len(_A ) != 1:
SCREAMING_SNAKE_CASE__ = [int(_A ) for i in num_string]
SCREAMING_SNAKE_CASE__ = 0
for i in range(0 , len(_A ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE__ = str(_A )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218
| 0
|
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__SCREAMING_SNAKE_CASE : str = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__SCREAMING_SNAKE_CASE : Union[str, Any] = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__SCREAMING_SNAKE_CASE : List[Any] = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :NestedDataStructureLike[PathLike] , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Optional[int] , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE : int = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :Dict ) -> Tuple:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 9
| 1
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class a__ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
a : Tuple = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
if os.name == "nt":
a = CursorInfo()
a = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase))
a = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase))
elif os.name == "posix":
sys.stdout.write("\033[?25l")
sys.stdout.flush()
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
if os.name == "nt":
a = CursorInfo()
a = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase))
a = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase))
elif os.name == "posix":
sys.stdout.write("\033[?25h")
sys.stdout.flush()
@contextmanager
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 180
|
from manim import *
class a__ ( UpperCamelCase__ ):
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = Rectangle(height=0.5 , width=0.5 )
a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
a = Rectangle(height=0.2_5 , width=0.2_5 )
a = [mem.copy() for i in range(6 )]
a = [mem.copy() for i in range(6 )]
a = VGroup(*A ).arrange(A , buff=0 )
a = VGroup(*A ).arrange(A , buff=0 )
a = VGroup(A , A ).arrange(A , buff=0 )
a = Text("CPU" , font_size=24 )
a = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A )
a = [mem.copy() for i in range(4 )]
a = VGroup(*A ).arrange(A , buff=0 )
a = Text("GPU" , font_size=24 )
a = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
gpu.move_to([-1, -1, 0] )
self.add(A )
a = [mem.copy() for i in range(6 )]
a = VGroup(*A ).arrange(A , buff=0 )
a = Text("Model" , font_size=24 )
a = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
model.move_to([3, -1.0, 0] )
self.add(A )
a = []
a = []
for i, rect in enumerate(A ):
a = fill.copy().set_fill(A , opacity=0.8 )
target.move_to(A )
model_arr.append(A )
a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(A , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A )
self.add(*A , *A )
a = [meta_mem.copy() for i in range(6 )]
a = [meta_mem.copy() for i in range(6 )]
a = VGroup(*A ).arrange(A , buff=0 )
a = VGroup(*A ).arrange(A , buff=0 )
a = VGroup(A , A ).arrange(A , buff=0 )
a = Text("Disk" , font_size=24 )
a = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
disk.move_to([-4, -1.2_5, 0] )
self.add(A , A )
a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A , A )
a = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(A , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A )
a = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A ) )
a = Square(0.3 )
input.set_fill(A , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A , buff=0.5 )
self.play(Write(A ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A , buff=0.0_2 )
self.play(MoveToTarget(A ) )
self.play(FadeOut(A ) )
a = Arrow(start=A , end=A , color=A , buff=0.5 )
a.next_to(model_arr[0].get_left() , A , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A , run_time=3 ) )
a = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2}
self.play(
Write(A ) , Circumscribe(model_arr[0] , color=A , **A ) , Circumscribe(model_cpu_arr[0] , color=A , **A ) , Circumscribe(gpu_rect[0] , color=A , **A ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
a = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , A , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
a = AnimationGroup(
FadeOut(A , run_time=0.5 ) , MoveToTarget(A , run_time=0.5 ) , FadeIn(A , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a = 0.7
self.play(
Circumscribe(model_arr[i] , **A ) , Circumscribe(cpu_left_col_base[i] , **A ) , Circumscribe(cpu_left_col_base[i + 1] , color=A , **A ) , Circumscribe(gpu_rect[0] , color=A , **A ) , Circumscribe(model_arr[i + 1] , color=A , **A ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A , **A ) , Circumscribe(cpu_left_col_base[-1] , color=A , **A ) , Circumscribe(gpu_rect[0] , color=A , **A ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
a = a_c
a = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(A ) , FadeOut(A , run_time=0.5 ) , )
a = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A , run_time=3 ) , MoveToTarget(A ) )
self.wait()
| 180
| 1
|
"""simple docstring"""
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
a_ = '''pytorch_model.bin'''
a_ = '''pytorch_model.bin.index.json'''
a_ = '''adapter_config.json'''
a_ = '''adapter_model.bin'''
a_ = '''adapter_model.safetensors'''
a_ = '''tf_model.h5'''
a_ = '''tf_model.h5.index.json'''
a_ = '''model.ckpt'''
a_ = '''flax_model.msgpack'''
a_ = '''flax_model.msgpack.index.json'''
a_ = '''model.safetensors'''
a_ = '''model.safetensors.index.json'''
a_ = '''config.json'''
a_ = '''preprocessor_config.json'''
a_ = FEATURE_EXTRACTOR_NAME
a_ = '''generation_config.json'''
a_ = '''modelcard.json'''
a_ = '''▁'''
a_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
a_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
a_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
a_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __UpperCAmelCase ( __UpperCamelCase ):
if version.parse(__UpperCamelCase ) < version.parse(__UpperCamelCase ):
if "dev" in min_version:
__lowercase : Dict = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
__lowercase : int = f"""This example requires a minimum version of {min_version},"""
error_message += f""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 249
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : Union[str, Any] ={
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 16 ):
"""simple docstring"""
lowercase__ : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
lowercase__ : Optional[Any] = DatasetDict(
{
"train": dataset["train"].select(lowerCamelCase__ ),
"validation": dataset["train"].select(lowerCamelCase__ ),
"test": dataset["validation"],
} )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Any = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : str = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : int = 16
elif accelerator.mixed_precision != "no":
lowercase__ : Dict = 8
else:
lowercase__ : str = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowercase__ : int = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowercase__ : Any = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowercase__ : Dict = DataLoader(
tokenized_datasets["test"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader, test_dataloader
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = []
# Download the dataset
lowercase__ : str = load_dataset("glue" , "mrpc" )
# Create our splits
lowercase__ : Any = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowercase__ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Dict = config["lr"]
lowercase__ : Union[str, Any] = int(config["num_epochs"] )
lowercase__ : List[Any] = int(config["seed"] )
lowercase__ : Any = int(config["batch_size"] )
lowercase__ : Tuple = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowercase__ : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase__ )
# New Code #
# Create our folds:
lowercase__ : Union[str, Any] = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
lowercase__ : Optional[int] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ , lowercase__ : Dict = get_fold_dataloaders(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Any = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[Any] = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowercase__ : Any = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**lowerCamelCase__ )
lowercase__ : List[str] = outputs.loss
lowercase__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Optional[Any] = model(**lowerCamelCase__ )
lowercase__ : Dict = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCamelCase__ )
# New Code #
# We also run predictions on the test set at the very end
lowercase__ : Tuple = []
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Optional[int] = model(**lowerCamelCase__ )
lowercase__ : Tuple = outputs.logits
lowercase__ , lowercase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCamelCase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowercase__ : Optional[Any] = torch.cat(lowerCamelCase__ , dim=0 )
lowercase__ : Optional[Any] = torch.stack(lowerCamelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowercase__ : Dict = metric.compute(predictions=lowerCamelCase__ , references=lowerCamelCase__ )
accelerator.print("Average test metrics from all folds:" , lowerCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=lowerCamelCase__ , default=3 , help="The number of splits to perform across the dataset" )
lowercase__ : List[Any] = parser.parse_args()
lowercase__ : Any = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 358
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase__ = 1.054571817e-34 # unit of ℏ : J * s
lowerCAmelCase__ = 3e8 # unit of c : m * s^-1
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowercase__ : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase__ : str = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase__ : Tuple = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121
| 0
|
"""simple docstring"""
def _A (__a , __a , __a ) -> Dict:
"""simple docstring"""
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(snake_case_ , snake_case_ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
SCREAMING_SNAKE_CASE_ : Optional[Any] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
SCREAMING_SNAKE_CASE_ : Optional[int] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
|
"""simple docstring"""
def lowercase__ ( snake_case_ :Union[str, Any] ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = max(snake_case_ )
__UpperCAmelCase = min(snake_case_ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , snake_case_ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , snake_case_ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowercase__ ( snake_case_ :str ):
return "".join([chr(snake_case_ ) for i in counting_sort([ord(snake_case_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
_lowercase : int = input('Enter numbers separated by a comma:\n').strip()
_lowercase : int = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 332
| 0
|
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
UpperCAmelCase : Optional[Any] = datasets.logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
UpperCAmelCase : Tuple = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
UpperCAmelCase : Dict = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
UpperCAmelCase : Tuple = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
__SCREAMING_SNAKE_CASE = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
__SCREAMING_SNAKE_CASE = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__SCREAMING_SNAKE_CASE = self.config_name.upper()
else:
raise KeyError(
f'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__SCREAMING_SNAKE_CASE = score.BleurtScorer(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scorer.score(references=__SCREAMING_SNAKE_CASE , candidates=__SCREAMING_SNAKE_CASE )
return {"scores": scores}
| 362
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AltDiffusionPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__SCREAMING_SNAKE_CASE = 77
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[str]:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A photo of an astronaut"""
__SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""numpy""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 331
| 0
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
snake_case : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( _snake_case ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 94
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( _snake_case : int , _snake_case : int ):
"""simple docstring"""
__a =old_name
if "patch_embed" in old_name:
__a , __a , __a =old_name.split('.' )
if layer == "0":
__a =old_name.replace('0' , 'convolution1' )
elif layer == "1":
__a =old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
__a =old_name.replace('3' , 'convolution2' )
else:
__a =old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , _snake_case ):
__a =r'\b\d{2}\b'
if bool(re.search(_snake_case , _snake_case ) ):
__a =re.search(r'\d\.\d\d.' , _snake_case ).group()
else:
__a =re.search(r'\d\.\d.' , _snake_case ).group()
if int(match[0] ) < 6:
__a =old_name.replace(_snake_case , '' )
__a =trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
__a ='intermediate_stages.' + trimmed_name
else:
__a =old_name.replace(_snake_case , '' )
if int(match[2] ) < num_meta4D_last_stage:
__a =trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
__a =str(int(match[2] ) - num_meta4D_last_stage )
__a =trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
__a =trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
__a =trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
__a =trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
__a =trimmed_name.replace('fc2' , 'linear_out' )
__a ='last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , _snake_case ):
__a =old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
__a =new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__a =new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__a =new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
__a =new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
__a =new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
__a =new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
__a ='efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__a =new_name.replace('norm' , 'layernorm' )
__a ='efficientformer.' + new_name
else:
__a ='efficientformer.encoder.' + new_name
return new_name
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Dict ):
"""simple docstring"""
for key in checkpoint.copy().keys():
__a =checkpoint.pop(_snake_case )
__a =val
return checkpoint
def UpperCamelCase_( ):
"""simple docstring"""
__a ='http://images.cocodataset.org/val2017/000000039769.jpg'
__a =Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
def UpperCamelCase_( _snake_case : Path , _snake_case : Path , _snake_case : Path , _snake_case : bool ):
"""simple docstring"""
__a =torch.load(_snake_case , map_location='cpu' )['model']
__a =EfficientFormerConfig.from_json_file(_snake_case )
__a =EfficientFormerForImageClassificationWithTeacher(_snake_case )
__a ='_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
__a =config.depths[-1] - config.num_metaad_blocks + 1
__a =convert_torch_checkpoint(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
model.eval()
__a ={
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
__a =prepare_img()
__a =256
__a =224
__a =EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
__a =processor(images=_snake_case , return_tensors='pt' ).pixel_values
# original processing pipeline
__a =Compose(
[
Resize(_snake_case , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_snake_case ),
ToTensor(),
Normalize(_snake_case , _snake_case ),
] )
__a =image_transforms(_snake_case ).unsqueeze(0 )
assert torch.allclose(_snake_case , _snake_case )
__a =model(_snake_case )
__a =outputs.logits
__a =(1, 1000)
if "l1" in model_name:
__a =torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__a =torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__a =torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(_snake_case )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=_snake_case , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 218
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 223
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase_ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCamelCase : Optional[datasets.Features] = None
__UpperCamelCase : str = "utf-8"
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : bool = True # deprecated
__UpperCamelCase : Optional[int] = None # deprecated
__UpperCamelCase : int = 10 << 20 # 10MB
__UpperCamelCase : Optional[bool] = None
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__UpperCamelCase : Tuple = JsonConfig
def lowerCAmelCase__ ( self : int ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCamelCase_: List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCamelCase_: Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
UpperCamelCase_: List[Any] = data_files
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = [files]
UpperCamelCase_: Any = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase_: Dict = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: Tuple = [files]
UpperCamelCase_: Optional[int] = [dl_manager.iter_files(snake_case_ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase__ ( self : str , snake_case_ : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase_: Union[str, Any] = self.config.features.arrow_schema.field(snake_case_ ).type
UpperCamelCase_: Tuple = pa_table.append_column(snake_case_ , pa.array([None] * len(snake_case_ ) , type=snake_case_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase_: int = table_cast(snake_case_ , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[Any] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Dict = json.load(snake_case_ )
# We keep only the field we are interested in
UpperCamelCase_: Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case_ , (list, tuple) ):
UpperCamelCase_: Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: int = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
else:
UpperCamelCase_: Optional[int] = dataset
UpperCamelCase_: List[str] = pa.Table.from_pydict(snake_case_ )
yield file_idx, self._cast_table(snake_case_ )
# If the file has one json object per line
else:
with open(snake_case_ , """rb""" ) as f:
UpperCamelCase_: Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase_: Optional[int] = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase_: Tuple = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCamelCase_: int = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase_: Tuple = batch.decode(self.config.encoding , errors=snake_case_ ).encode("""utf-8""" )
try:
while True:
try:
UpperCamelCase_: Tuple = paj.read_json(
io.BytesIO(snake_case_ ) , read_options=paj.ReadOptions(block_size=snake_case_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case_ , pa.ArrowInvalid )
and "straddling" not in str(snake_case_ )
or block_size > len(snake_case_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(snake_case_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Optional[Any] = json.load(snake_case_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case_ , snake_case_ ): # list is the only sequence type supported in JSON
try:
UpperCamelCase_: Any = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: List[str] = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
UpperCamelCase_: int = pa.Table.from_pydict(snake_case_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(snake_case_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case_ )
batch_idx += 1
| 223
| 1
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = False ) -> Union[str, Any]:
_A = scheduler
_A = optimizers if isinstance(lowerCAmelCase_ , (list, tuple) ) else [optimizers]
_A = split_batches
_A = step_with_optimizer
_A = GradientState()
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCAmelCase_ , **lowerCAmelCase_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCAmelCase_ , **lowerCAmelCase_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_A = AcceleratorState().num_processes
for _ in range(lowerCAmelCase_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCAmelCase_ , **lowerCAmelCase_ )
else:
self.scheduler.step(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
return self.scheduler.get_last_lr()
def UpperCAmelCase ( self ) -> Tuple:
return self.scheduler.state_dict()
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.scheduler.load_state_dict(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
return self.scheduler.get_lr()
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
return self.scheduler.print_lr(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 180
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
_A = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
_A = tempfile.mkdtemp()
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
# load decoder from hub
_A = """hf-internal-testing/ngram-beam-search-decoder"""
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Tuple:
_A = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
_A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(lowerCAmelCase_ , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = floats_list((3, 10_00) )
_A = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = """This is a test string"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self , lowerCAmelCase_=(2, 10, 16) , lowerCAmelCase_=77 ) -> Tuple:
np.random.seed(lowerCAmelCase_ )
return np.random.rand(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_A = processor.decode(lowerCAmelCase_ )
_A = decoder.decode_beams(lowerCAmelCase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_A = processor.batch_decode(lowerCAmelCase_ )
else:
with get_context(lowerCAmelCase_ ).Pool() as pool:
_A = processor.batch_decode(lowerCAmelCase_ , lowerCAmelCase_ )
_A = list(lowerCAmelCase_ )
with get_context("""fork""" ).Pool() as p:
_A = decoder.decode_beams_batch(lowerCAmelCase_ , lowerCAmelCase_ )
_A , _A , _A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.lm_score )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
_A = 15
_A = -20.0
_A = -4.0
_A = processor.batch_decode(
lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
_A = decoded_processor_out.text
_A = list(lowerCAmelCase_ )
with get_context("""fork""" ).Pool() as pool:
_A = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
_A = [d[0][0] for d in decoded_decoder_out]
_A = [d[0][2] for d in decoded_decoder_out]
_A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , lowerCAmelCase_ )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , lowerCAmelCase_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , lowerCAmelCase_ , atol=1E-3 ) )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
_A = 2.0
_A = 5.0
_A = -20.0
_A = True
_A = processor.batch_decode(
lowerCAmelCase_ , alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
_A = decoded_processor_out.text
_A = list(lowerCAmelCase_ )
decoder.reset_params(
alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
with get_context("""fork""" ).Pool() as pool:
_A = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , )
_A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , lowerCAmelCase_ )
_A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_A = os.listdir(lowerCAmelCase_ )
_A = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_A = WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase_ )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_A = os.listdir(lowerCAmelCase_ )
_A = os.listdir(lowerCAmelCase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = floats_list((3, 10_00) )
_A = processor_wavaveca(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor_auto(lowerCAmelCase_ , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_A = self._get_dummy_logits()
_A = processor_wavaveca.batch_decode(lowerCAmelCase_ )
_A = processor_auto.batch_decode(lowerCAmelCase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self ) -> Any:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = self._get_dummy_logits()[0]
_A = processor.decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def UpperCAmelCase ( self ) -> Any:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = self._get_dummy_logits()
_A = processor.batch_decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase ( self ) -> Any:
import torch
_A = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=lowerCAmelCase_ )
_A = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
_A = iter(lowerCAmelCase_ )
_A = next(lowerCAmelCase_ )
_A = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_A = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_A = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
_A = model(lowerCAmelCase_ ).logits.cpu().numpy()
_A = processor.decode(logits[0] , output_word_offsets=lowerCAmelCase_ )
_A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_A = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
_A = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) , lowerCAmelCase_ )
self.assertEqual(""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) , output.text )
# output times
_A = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , """start_time""" ) )
_A = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , """end_time""" ) )
# fmt: off
_A = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_A = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.01 ) )
| 180
| 1
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
__snake_case = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=8 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
SCREAMING_SNAKE_CASE__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : MultilingualCLIP , UpperCAmelCase_ : XLMRobertaTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , UpperCAmelCase_ : VQModel , ):
super().__init__()
self.register_modules(
text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , movq=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
if latents is None:
SCREAMING_SNAKE_CASE__ = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
SCREAMING_SNAKE_CASE__ = latents.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = latents * scheduler.init_noise_sigma
return latents
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=None , ):
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE__ = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , max_length=77 , return_attention_mask=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ = text_inputs.input_ids
SCREAMING_SNAKE_CASE__ = self.tokenizer(UpperCAmelCase_ , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
SCREAMING_SNAKE_CASE__ = text_input_ids.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = text_inputs.attention_mask.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.text_encoder(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = prompt_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
SCREAMING_SNAKE_CASE__ = text_encoder_hidden_states.repeat_interleave(UpperCAmelCase_ , dim=0 )
SCREAMING_SNAKE_CASE__ = text_mask.repeat_interleave(UpperCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ = 42
if negative_prompt is None:
SCREAMING_SNAKE_CASE__ = [''] * batch_size
elif type(UpperCAmelCase_ ) is not type(UpperCAmelCase_ ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase_ )} !='
F' {type(UpperCAmelCase_ )}.' )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [negative_prompt]
elif batch_size != len(UpperCAmelCase_ ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase_ )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
SCREAMING_SNAKE_CASE__ = negative_prompt
SCREAMING_SNAKE_CASE__ = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=77 , truncation=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ = uncond_input.input_ids.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = uncond_input.attention_mask.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.text_encoder(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE__ = negative_prompt_embeds.repeat(1 , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = uncond_text_encoder_hidden_states.shape[1]
SCREAMING_SNAKE_CASE__ = uncond_text_encoder_hidden_states.repeat(1 , UpperCAmelCase_ , 1 )
SCREAMING_SNAKE_CASE__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 )
SCREAMING_SNAKE_CASE__ = uncond_text_mask.repeat_interleave(UpperCAmelCase_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
SCREAMING_SNAKE_CASE__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
SCREAMING_SNAKE_CASE__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def A_ ( self : Optional[int] , UpperCAmelCase_ : Dict=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE__ = torch.device(F'cuda:{gpu_id}' )
SCREAMING_SNAKE_CASE__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : Tuple=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE__ = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=UpperCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cpu_offload_with_hook(UpperCAmelCase_ , UpperCAmelCase_ , prev_module_hook=UpperCAmelCase_ )
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cpu_offload_with_hook(self.safety_checker , UpperCAmelCase_ , prev_module_hook=UpperCAmelCase_ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A_ ( self : str ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase_ )
def __call__( self : str , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : float = 4.0 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = 1
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}' )
SCREAMING_SNAKE_CASE__ = self._execution_device
SCREAMING_SNAKE_CASE__ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE__ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._encode_prompt(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCAmelCase_ , dim=0 )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ = image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
SCREAMING_SNAKE_CASE__ = negative_image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=UpperCAmelCase_ )
self.scheduler.set_timesteps(UpperCAmelCase_ , device=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.unet.config.in_channels
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_new_h_w(UpperCAmelCase_ , UpperCAmelCase_ , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE__ = self.unet(
sample=UpperCAmelCase_ , timestep=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , added_cond_kwargs=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ , ).prev_sample
# post-processing
SCREAMING_SNAKE_CASE__ = self.movq.decode(UpperCAmelCase_ , force_not_quantize=UpperCAmelCase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE__ = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 169
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__snake_case = 50_00_00
__snake_case ,__snake_case = os.path.split(__file__)
__snake_case = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _lowercase ( UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dataset.map(**UpperCamelCase_ )
@get_duration
def _lowercase ( UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dataset.filter(**UpperCamelCase_ )
def _lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
SCREAMING_SNAKE_CASE__ = generate_example_dataset(
os.path.join(UpperCamelCase_ , 'dataset.arrow' ) , UpperCamelCase_ , num_examples=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=UpperCamelCase_ )
def tokenize(UpperCamelCase_ ):
return tokenizer(examples['text'] )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , batched=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='numpy' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='pandas' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=UpperCamelCase_ , batched=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = filter(UpperCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase_ , 'wb' ) as f:
f.write(json.dumps(UpperCamelCase_ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 169
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list ):
def merge(_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return collection
__a : int = len(_SCREAMING_SNAKE_CASE ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
__lowercase : Optional[Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 27
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( a ) -> int:
_A: int = filter(lambda a : p.requires_grad , model.parameters() )
_A: Dict = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ : str = logging.getLogger(__name__)
def lowerCamelCase__ ( a , a ) -> Dict:
if metric == "rouge2":
_A: Tuple = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_A: List[str] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_A: str = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_A: List[str] = ModelCheckpoint(
dirpath=a , filename=a , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a , verbose=a , )
class UpperCAmelCase ( pl.Callback ):
'''simple docstring'''
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ):
"""simple docstring"""
_A: Union[str, Any] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : Any=True ):
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_A: Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_A: Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A: List[str] = od / '''test_results.txt'''
_A: Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A: Any = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_A: Dict = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''a+''' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A: Optional[int] = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
_A: List[str] = val.item()
_A: List[Any] = F"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
_A: Optional[int] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCAmelCase_ )
@rank_zero_only
def __magic_name__ ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
try:
_A: Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
_A: Any = pl_module.model.num_parameters()
_A: Optional[int] = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , '''test''' )
@rank_zero_only
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 121
| 0
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : int = 400_0000 ):
'''simple docstring'''
_a = [0, 1]
_a = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_a = 0
for j in range(len(UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 179
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=_a ):
lowercase_ = ['torch', 'scipy']
def __init__( self : Tuple , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : str ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 179
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : str = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
UpperCAmelCase_ : List[Any] = '''▁'''
# Segments (not really needed)
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : Optional[Any] = 4
class lowerCAmelCase__ ( lowercase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = """left"""
__UpperCamelCase = XLNetTokenizer
def __init__( self : Tuple , lowercase_ : Dict=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=False , lowercase_ : int=True , lowercase_ : Optional[Any]=False , lowercase_ : Dict="<s>" , lowercase_ : List[Any]="</s>" , lowercase_ : List[str]="<unk>" , lowercase_ : int="<sep>" , lowercase_ : Dict="<pad>" , lowercase_ : List[str]="<cls>" , lowercase_ : Tuple="<mask>" , lowercase_ : int=["<eop>", "<eod>"] , **lowercase_ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token
super().__init__(
vocab_file=_A , tokenizer_file=_A , do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , **_A , )
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : Tuple = do_lower_case
SCREAMING_SNAKE_CASE_ : Union[str, Any] = remove_space
SCREAMING_SNAKE_CASE_ : List[Any] = keep_accents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE_ : int = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(_A):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_A):
copyfile(self.vocab_file , _A)
return (out_vocab_file,)
| 91
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 331
| 0
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _snake_case ( a_ ):
'''simple docstring'''
def __init__( self: str ,lowerCamelCase_: Optional[Any]=0.0_1 ,lowerCamelCase_: List[Any]=1000 ) -> Any:
UpperCAmelCase_ : Any = p_stop
UpperCAmelCase_ : Dict = max_length
def __iter__( self: int ) -> str:
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCAmelCase_ : str = random.random() < self.p_stop
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: List[str]=True ) -> int:
UpperCAmelCase_ : Any = [
BatchSamplerShard(lowercase_ ,2 ,lowercase_ ,split_batches=lowercase_ ,even_batches=lowercase_ )
for i in range(2 )
]
UpperCAmelCase_ : List[str] = [list(lowercase_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowercase_ ) for shard in batch_sampler_shards] ,[len(lowercase_ ) for e in expected] )
self.assertListEqual(lowercase_ ,lowercase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Optional[int] = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ )
UpperCAmelCase_ : int = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=lowercase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase_ ,lowercase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ )
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase_ : Any = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ )
UpperCAmelCase_ : Any = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ )
UpperCAmelCase_ : int = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Any = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : Dict = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ )
UpperCAmelCase_ : Optional[int] = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ )
UpperCAmelCase_ : Dict = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=lowercase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase_ : Optional[int] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ )
UpperCAmelCase_ : Dict = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase_ : Tuple = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ )
UpperCAmelCase_ : List[Any] = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Any = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ )
UpperCAmelCase_ : Dict = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : Dict = [[], []]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ )
def A__ ( self: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,even_batches=lowercase_ )
UpperCAmelCase_ : Dict = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=lowercase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,even_batches=lowercase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,even_batches=lowercase_ )
UpperCAmelCase_ : Dict = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,even_batches=lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase_ : int = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,even_batches=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,even_batches=lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase_ : Any = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,even_batches=lowercase_ )
UpperCAmelCase_ : int = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,even_batches=lowercase_ )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,even_batches=lowercase_ )
UpperCAmelCase_ : Tuple = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowercase_ )
UpperCAmelCase_ : Optional[int] = [[], []]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,even_batches=lowercase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : int = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ ,even_batches=lowercase_ )
UpperCAmelCase_ : List[str] = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=lowercase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ ,even_batches=lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ ,even_batches=lowercase_ )
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ ,even_batches=lowercase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase_ : Dict = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ ,even_batches=lowercase_ )
UpperCAmelCase_ : Any = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ ,even_batches=lowercase_ )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Any = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ ,even_batches=lowercase_ )
UpperCAmelCase_ : List[str] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : str = [[], []]
self.check_batch_sampler_shards(lowercase_ ,lowercase_ ,split_batches=lowercase_ ,even_batches=lowercase_ )
def A__ ( self: Tuple ) -> Dict:
UpperCAmelCase_ : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCAmelCase_ : int = [BatchSamplerShard(lowercase_ ,2 ,lowercase_ ,even_batches=lowercase_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) ,3 )
self.assertEqual(len(batch_sampler_shards[1] ) ,2 )
self.assertListEqual(list(batch_sampler_shards[0] ) ,[[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) ,[[3, 4], [9, 10, 11]] )
def A__ ( self: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: str=False ) -> List[Any]:
random.seed(lowercase_ )
UpperCAmelCase_ : Optional[int] = list(lowercase_ )
UpperCAmelCase_ : int = [
IterableDatasetShard(
lowercase_ ,batch_size=lowercase_ ,drop_last=lowercase_ ,num_processes=lowercase_ ,process_index=lowercase_ ,split_batches=lowercase_ ,)
for i in range(lowercase_ )
]
UpperCAmelCase_ : List[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowercase_ )
iterable_dataset_lists.append(list(lowercase_ ) )
UpperCAmelCase_ : List[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCAmelCase_ : Any = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
self.assertTrue(len(lowercase_ ) % shard_batch_size == 0 )
UpperCAmelCase_ : int = []
for idx in range(0 ,len(lowercase_ ) ,lowercase_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowercase_ ) < len(lowercase_ ):
reference += reference
self.assertListEqual(lowercase_ ,reference[: len(lowercase_ )] )
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = 42
UpperCAmelCase_ : str = RandomIterableDataset()
self.check_iterable_dataset_shards(lowercase_ ,lowercase_ ,batch_size=4 ,drop_last=lowercase_ ,split_batches=lowercase_ )
self.check_iterable_dataset_shards(lowercase_ ,lowercase_ ,batch_size=4 ,drop_last=lowercase_ ,split_batches=lowercase_ )
self.check_iterable_dataset_shards(lowercase_ ,lowercase_ ,batch_size=4 ,drop_last=lowercase_ ,split_batches=lowercase_ )
self.check_iterable_dataset_shards(lowercase_ ,lowercase_ ,batch_size=4 ,drop_last=lowercase_ ,split_batches=lowercase_ )
# Edge case with a very small dataset
UpperCAmelCase_ : Union[str, Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowercase_ ,lowercase_ ,batch_size=4 ,drop_last=lowercase_ ,split_batches=lowercase_ )
self.check_iterable_dataset_shards(lowercase_ ,lowercase_ ,batch_size=4 ,drop_last=lowercase_ ,split_batches=lowercase_ )
self.check_iterable_dataset_shards(lowercase_ ,lowercase_ ,batch_size=4 ,drop_last=lowercase_ ,split_batches=lowercase_ )
self.check_iterable_dataset_shards(lowercase_ ,lowercase_ ,batch_size=4 ,drop_last=lowercase_ ,split_batches=lowercase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(16 ) ,batch_size=4 ,drop_last=lowercase_ )
UpperCAmelCase_ : List[str] = SkipBatchSampler(lowercase_ ,2 )
self.assertListEqual(list(lowercase_ ) ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = SkipDataLoader(list(range(16 ) ) ,batch_size=4 ,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def A__ ( self: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = DataLoader(list(range(16 ) ) ,batch_size=4 )
UpperCAmelCase_ : Tuple = skip_first_batches(lowercase_ ,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Tuple = DataLoaderShard(list(range(16 ) ) ,batch_size=4 )
for idx, _ in enumerate(lowercase_ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowercase_ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
def A__ ( self: int ) -> Optional[int]:
Accelerator()
UpperCAmelCase_ : Any = DataLoaderDispatcher(range(16 ) ,batch_size=4 )
for idx, _ in enumerate(lowercase_ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowercase_ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
| 364
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _snake_case :
'''simple docstring'''
@staticmethod
def A__ ( *lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Tuple:
pass
def lowerCamelCase_ ( _a : Image ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = DepthEstimationPipeline(model=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> str:
UpperCAmelCase_ : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} ,lowerCamelCase_ )
import datasets
UpperCAmelCase_ : Union[str, Any] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" ,"""image""" ,split="""test""" )
UpperCAmelCase_ : List[str] = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] ,lowerCamelCase_ ,)
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def A__ ( self: List[str] ) -> Any:
pass
@slow
@require_torch
def A__ ( self: int ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = """Intel/dpt-large"""
UpperCAmelCase_ : str = pipeline("""depth-estimation""" ,model=lowerCamelCase_ )
UpperCAmelCase_ : Dict = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
UpperCAmelCase_ : Dict = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) ,2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) ,2.6_6_2 )
@require_torch
def A__ ( self: List[str] ) -> Any:
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 59
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class a_ ( _lowerCAmelCase ):
__A = ["pixel_values"]
def __init__( self : int , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : bool = True , **lowercase : Dict , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :Optional[int] = size if size is not None else {"shortest_edge": 224}
lowercase_ :Tuple = get_size_dict(lowercase , default_to_square=lowercase )
lowercase_ :Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowercase_ :List[Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="crop_size" )
lowercase_ :List[str] = do_resize
lowercase_ :Union[str, Any] = size
lowercase_ :List[Any] = resample
lowercase_ :List[str] = do_center_crop
lowercase_ :Union[str, Any] = crop_size
lowercase_ :str = do_rescale
lowercase_ :int = rescale_factor
lowercase_ :Any = do_normalize
lowercase_ :Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase_ :Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase_ :Optional[int] = do_convert_rgb
def lowercase__ ( self : List[str] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Optional[int] , ):
"""simple docstring"""
lowercase_ :Optional[int] = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowercase_ :int = get_resize_output_image_size(lowercase , size=size["shortest_edge"] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : str , ):
"""simple docstring"""
lowercase_ :Union[str, Any] = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def lowercase__ ( self : int , lowercase : np.ndarray , lowercase : Union[int, float] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Optional[Any] , ):
"""simple docstring"""
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : List[Any] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] , ):
"""simple docstring"""
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : List[str] , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : int = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : bool = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ):
"""simple docstring"""
lowercase_ :Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ :str = size if size is not None else self.size
lowercase_ :int = get_size_dict(lowercase , param_name="size" , default_to_square=lowercase )
lowercase_ :List[str] = resample if resample is not None else self.resample
lowercase_ :Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ :Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase_ :str = get_size_dict(lowercase , param_name="crop_size" , default_to_square=lowercase )
lowercase_ :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ :Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ :Dict = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ :Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowercase_ :List[Any] = image_std if image_std is not None else self.image_std
lowercase_ :List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ :List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ :Any = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
lowercase_ :List[Any] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
lowercase_ :Optional[Any] = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
lowercase_ :Optional[Any] = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
lowercase_ :Optional[int] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
lowercase_ :Any = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
lowercase_ :Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
lowercase_ :Optional[Any] = {"pixel_values": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 223
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = RobertaTokenizer
__A = RobertaTokenizerFast
__A = True
__A = {"cls_token": "<s>"}
def lowercase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ :List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase_ :List[Any] = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowercase_ :Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase_ :Union[str, Any] = {"unk_token": "<unk>"}
lowercase_ :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
def lowercase__ ( self : str , **lowercase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def lowercase__ ( self : int , **lowercase : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def lowercase__ ( self : Optional[int] , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :List[str] = "lower newer"
lowercase_ :Any = "lower newer"
return input_text, output_text
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ :Dict = "lower newer"
lowercase_ :Dict = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowercase_ :int = tokenizer.tokenize(lowercase ) # , add_prefix_space=True)
self.assertListEqual(lowercase , lowercase )
lowercase_ :Optional[Any] = tokens + [tokenizer.unk_token]
lowercase_ :Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowercase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowercase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.tokenizer_class.from_pretrained("roberta-base" )
lowercase_ :Any = tokenizer.encode("sequence builders" , add_special_tokens=lowercase )
lowercase_ :str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase )
lowercase_ :int = tokenizer.encode(
"sequence builders" , add_special_tokens=lowercase , add_prefix_space=lowercase )
lowercase_ :Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowercase , add_prefix_space=lowercase )
lowercase_ :str = tokenizer.build_inputs_with_special_tokens(lowercase )
lowercase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Optional[int] = self.get_tokenizer()
lowercase_ :str = "Encode this sequence."
lowercase_ :Tuple = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
lowercase_ :List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
lowercase_ :List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase , lowercase )
lowercase_ :List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
lowercase_ :List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase , lowercase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowercase_ :List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowercase_ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase , lowercase )
# Testing spaces after special tokens
lowercase_ :Union[str, Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase )} ) # mask token has a left space
lowercase_ :Any = tokenizer.convert_tokens_to_ids(lowercase )
lowercase_ :Tuple = "Encode <mask> sequence"
lowercase_ :int = "Encode <mask>sequence"
lowercase_ :str = tokenizer.encode(lowercase )
lowercase_ :Any = encoded.index(lowercase )
lowercase_ :Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase , lowercase )
lowercase_ :str = tokenizer.encode(lowercase )
lowercase_ :int = encoded.index(lowercase )
lowercase_ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase , lowercase )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowercase__ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ :List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowercase_ :Dict = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
lowercase_ :str = "A, <mask> AllenNLP sentence."
lowercase_ :Tuple = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
lowercase_ :str = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase_ :Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase_ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def lowercase__ ( self : Dict ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase_ :List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase_ :Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowercase )
self.assertEqual(post_processor_state["add_prefix_space"] , lowercase )
self.assertEqual(post_processor_state["trim_offsets"] , lowercase )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ :Tuple = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase_ :Optional[Any] = F'{text_of_1_token} {text_of_1_token}'
lowercase_ :int = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Optional[int] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Dict = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Any = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Optional[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Dict = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase_ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Optional[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Dict = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Dict = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
| 223
| 1
|
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __SCREAMING_SNAKE_CASE ( __snake_case ):
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : Optional[int]=0.999 , __lowercase : Optional[int]="cosine" , ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowercase : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowercase : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_UpperCAmelCase = []
for i in range(_a ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_a ) / alpha_bar_fn(_a ) , _a ) )
return torch.tensor(_a , dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
_lowerCamelCase : str = 1
@register_to_config
def __init__( self : int , snake_case_ : int = 1_0_0_0 , snake_case_ : float = 0.0_0_0_1 , snake_case_ : float = 0.0_2 , snake_case_ : str = "linear" , snake_case_ : Optional[Union[np.ndarray, List[float]]] = None , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : int = 0 , snake_case_ : str = "epsilon" , snake_case_ : float = 1.0 , **snake_case_ : Optional[int] , ):
if kwargs.get("set_alpha_to_one" , lowerCamelCase_ ) is not None:
_UpperCAmelCase = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("set_alpha_to_one" , "1.0.0" , lowerCamelCase_ , standard_warn=lowerCamelCase_ )
_UpperCAmelCase = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(lowerCamelCase_ )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , lowerCamelCase_ ).copy().astype(np.intaa ) )
def lowercase ( self : Union[str, Any] , snake_case_ : torch.FloatTensor , snake_case_ : Optional[int] = None ):
return sample
def lowercase ( self : Any , snake_case_ : int , snake_case_ : Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
f' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
f' maximal {self.config.num_train_timesteps} timesteps.' )
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round().copy().astype(np.intaa )
_UpperCAmelCase = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
self.timesteps += self.config.steps_offset
def lowercase ( self : Any , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : float = 0.0 , snake_case_ : bool = False , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : bool = True , ):
# 1. get previous step value (=t+1)
_UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_UpperCAmelCase = self.alphas_cumprod[timestep]
_UpperCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_UpperCAmelCase = model_output
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
_UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
def __len__( self : List[str] ):
return self.config.num_train_timesteps
| 365
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : list ) -> list:
'''simple docstring'''
for i in range(len(__lowercase ) - 1 , 0 , -1 ):
_UpperCAmelCase = False
for j in range(__lowercase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_UpperCAmelCase , _UpperCAmelCase = unsorted[j - 1], unsorted[j]
_UpperCAmelCase = True
for j in range(__lowercase ):
if unsorted[j] > unsorted[j + 1]:
_UpperCAmelCase , _UpperCAmelCase = unsorted[j + 1], unsorted[j]
_UpperCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE :List[str] = input('''Enter numbers separated by a comma:\n''').strip()
__SCREAMING_SNAKE_CASE :Any = [int(item) for item in user_input.split(''',''')]
print(F"{cocktail_shaker_sort(unsorted) = }")
| 156
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCAmelCase : Optional[Any] = "pt"
elif is_tf_available():
_lowerCAmelCase : List[str] = "tf"
else:
_lowerCAmelCase : Tuple = "jax"
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = PerceiverTokenizer
UpperCAmelCase_ = False
def UpperCAmelCase_ ( self :Union[str, Any] ) -> Any:
super().setUp()
UpperCAmelCase__ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self :int ) -> Tuple:
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def UpperCAmelCase_ ( self :List[str] , **lowerCamelCase :Optional[Any] ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :str , lowerCamelCase :Optional[int]=False , lowerCamelCase :List[str]=20 , lowerCamelCase :List[Any]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCAmelCase__ = []
for i in range(len(lowerCamelCase ) ):
try:
UpperCAmelCase__ = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase__ = list(filter(lambda lowerCamelCase : re.match(r"^[ a-zA-Z]+$" , t[1] ) , lowerCamelCase ) )
UpperCAmelCase__ = list(filter(lambda lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase ) , lowerCamelCase ) )
if max_length is not None and len(lowerCamelCase ) > max_length:
UpperCAmelCase__ = toks[:max_length]
if min_length is not None and len(lowerCamelCase ) < min_length and len(lowerCamelCase ) > 0:
while len(lowerCamelCase ) < min_length:
UpperCAmelCase__ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase__ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase__ = tokenizer.decode(lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
if " " not in output_txt and len(lowerCamelCase ) > 1:
UpperCAmelCase__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase )
)
if with_prefix_space:
UpperCAmelCase__ = " " + output_txt
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
return output_txt, output_ids
def UpperCAmelCase_ ( self :List[str] ) -> str:
UpperCAmelCase__ = self.perceiver_tokenizer
UpperCAmelCase__ = "Unicode €."
UpperCAmelCase__ = tokenizer(lowerCamelCase )
UpperCAmelCase__ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , lowerCamelCase )
# decoding
UpperCAmelCase__ = tokenizer.decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase__ = tokenizer("e è é ê ë" )
UpperCAmelCase__ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , lowerCamelCase )
# decoding
UpperCAmelCase__ = tokenizer.decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def UpperCAmelCase_ ( self :Optional[int] ) -> Dict:
UpperCAmelCase__ = self.perceiver_tokenizer
UpperCAmelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase__ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase__ = tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
if FRAMEWORK != "jax":
UpperCAmelCase__ = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self :List[Any] ) -> List[str]:
UpperCAmelCase__ = self.perceiver_tokenizer
UpperCAmelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase__ = tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , lowerCamelCase )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertNotIn("decoder_input_ids" , lowerCamelCase )
self.assertNotIn("decoder_attention_mask" , lowerCamelCase )
def UpperCAmelCase_ ( self :List[Any] ) -> int:
UpperCAmelCase__ = self.perceiver_tokenizer
UpperCAmelCase__ = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase__ = tokenizer(
text_target=lowerCamelCase , max_length=32 , padding="max_length" , truncation=lowerCamelCase , return_tensors=lowerCamelCase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def UpperCAmelCase_ ( self :List[str] ) -> Optional[int]:
# safety check on max_len default value so we are sure the test works
UpperCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
UpperCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCamelCase )
UpperCAmelCase__ = after_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
shutil.rmtree(lowerCamelCase )
UpperCAmelCase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
UpperCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCamelCase )
UpperCAmelCase__ = after_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> List[str]:
UpperCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase )
with open(os.path.join(lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase__ = json.load(lowerCamelCase )
with open(os.path.join(lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase__ = json.load(lowerCamelCase )
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(125 )]
UpperCAmelCase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowerCamelCase , lowerCamelCase )
with open(os.path.join(lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowerCamelCase , lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase__ = tokenizer_class.from_pretrained(
lowerCamelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=lowerCamelCase )]
UpperCAmelCase__ = tokenizer_class.from_pretrained(
lowerCamelCase , additional_special_tokens=lowerCamelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def UpperCAmelCase_ ( self :Union[str, Any] ) -> int:
UpperCAmelCase__ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def UpperCAmelCase_ ( self :Dict ) -> List[str]:
pass
def UpperCAmelCase_ ( self :Union[str, Any] ) -> Tuple:
pass
def UpperCAmelCase_ ( self :Optional[int] ) -> Dict:
pass
def UpperCAmelCase_ ( self :Dict ) -> int:
pass
def UpperCAmelCase_ ( self :int ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
UpperCAmelCase__ = self.get_tokenizers(fast=lowerCamelCase , do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase__ = tokenizer.convert_tokens_to_string(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
| 169
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : List[str] = 1_6
_lowerCAmelCase : List[Any] = 3_2
def lowerCAmelCase ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : int = mocked_dataloaders # noqa: F811
def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
UpperCAmelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
set_seed(_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase__ = os.path.split(_lowerCAmelCase )[-1].split("." )[0]
accelerator.init_trackers(_lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase__ = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_lowerCAmelCase ),
"epoch": epoch,
} , step=_lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_lowerCAmelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 169
| 1
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A ( unittest.TestCase ):
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[int] ):
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 250
lowerCAmelCase : Dict = ids_tensor((batch_size, length) , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = torch.ones((batch_size, length) , device=UpperCAmelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = self._get_tensors(5 )
lowerCAmelCase : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=10 )
lowerCAmelCase : Any = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : Dict = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : Dict = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Any = self._get_tensors(5 )
lowerCAmelCase : int = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowercase__ ( self : Optional[int] ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCAmelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCAmelCase_ ) , 1 )
| 363
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a_ = logging.getLogger(__name__)
def __lowercase ( snake_case_ : Tuple ,snake_case_ : Any ) ->Dict:
'''simple docstring'''
if os.path.exists(snake_case_ ):
if os.path.exists(os.path.join(snake_case_ ,'''config.json''' ) ) and os.path.isfile(
os.path.join(snake_case_ ,'''config.json''' ) ):
os.remove(os.path.join(snake_case_ ,'''config.json''' ) )
if os.path.exists(os.path.join(snake_case_ ,'''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(snake_case_ ,'''pytorch_model.bin''' ) ):
os.remove(os.path.join(snake_case_ ,'''pytorch_model.bin''' ) )
else:
os.makedirs(snake_case_ )
model.save_pretrained(snake_case_ )
def __lowercase ( snake_case_ : str ,snake_case_ : Optional[Any]=False ) ->Any:
'''simple docstring'''
__A : Any = 2
if unlogit:
__A : Dict = torch.pow(snake_case_ ,snake_case_ )
__A : str = p * torch.log(snake_case_ )
__A : Dict = 0
return -plogp.sum(dim=-1 )
def __lowercase ( snake_case_ : Dict ) ->List[str]:
'''simple docstring'''
logger.info('''lv, h >\t''' + '''\t'''.join(F"""{x + 1}""" for x in range(len(snake_case_ ) ) ) )
for row in range(len(snake_case_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def __lowercase ( snake_case_ : Dict ,snake_case_ : Optional[Any] ,snake_case_ : Dict ,snake_case_ : Dict=True ,snake_case_ : str=True ,snake_case_ : List[str]=None ,snake_case_ : int=False ) ->str:
'''simple docstring'''
__A , __A : int = model.config.num_hidden_layers, model.config.num_attention_heads
__A : Optional[Any] = torch.zeros(snake_case_ ,snake_case_ ).to(args.device )
__A : Union[str, Any] = torch.zeros(snake_case_ ,snake_case_ ).to(args.device )
if head_mask is None:
__A : Union[str, Any] = torch.ones(snake_case_ ,snake_case_ ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__A : str = None
__A : str = 0.0
__A : str = 0.0
for step, inputs in enumerate(tqdm(snake_case_ ,desc='''Iteration''' ,disable=args.local_rank not in [-1, 0] ) ):
__A : Optional[int] = tuple(t.to(args.device ) for t in inputs )
((__A) , ) : Any = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__A : Union[str, Any] = model(snake_case_ ,labels=snake_case_ ,head_mask=snake_case_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__A , __A , __A : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case_ ):
__A : Optional[int] = entropy(attn.detach() ,snake_case_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__A : str = 2
__A : List[Any] = torch.pow(torch.pow(snake_case_ ,snake_case_ ).sum(-1 ) ,1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__A : int = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(snake_case_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(snake_case_ )
logger.info('''Head ranked by importance scores''' )
__A : int = torch.zeros(head_importance.numel() ,dtype=torch.long ,device=args.device )
__A : Optional[int] = torch.arange(
head_importance.numel() ,device=args.device )
__A : Union[str, Any] = head_ranks.view_as(snake_case_ )
print_ad_tensor(snake_case_ )
return attn_entropy, head_importance, total_loss
def __lowercase ( snake_case_ : List[Any] ,snake_case_ : Tuple ,snake_case_ : int ) ->Dict:
'''simple docstring'''
__A , __A , __A : str = compute_heads_importance(snake_case_ ,snake_case_ ,snake_case_ ,compute_entropy=snake_case_ )
__A : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' ,snake_case_ ,original_score * args.masking_threshold )
__A : Union[str, Any] = torch.ones_like(snake_case_ )
__A : Optional[Any] = max(1 ,int(new_head_mask.numel() * args.masking_amount ) )
__A : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
__A : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__A : Optional[int] = float('''Inf''' )
__A : str = head_importance.view(-1 ).sort()[1]
if len(snake_case_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
__A : Union[str, Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' ,str(current_heads_to_mask.tolist() ) )
__A : Optional[Any] = new_head_mask.view(-1 )
__A : int = 0.0
__A : int = new_head_mask.view_as(snake_case_ )
__A : Dict = new_head_mask.clone().detach()
print_ad_tensor(snake_case_ )
# Compute metric and head importance again
__A , __A , __A : List[Any] = compute_heads_importance(
snake_case_ ,snake_case_ ,snake_case_ ,compute_entropy=snake_case_ ,head_mask=snake_case_ )
__A : List[str] = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' ,snake_case_ ,new_head_mask.sum() ,new_head_mask.sum() / new_head_mask.numel() * 100 ,)
logger.info('''Final head mask''' )
print_ad_tensor(snake_case_ )
np.save(os.path.join(args.output_dir ,'''head_mask.npy''' ) ,head_mask.detach().cpu().numpy() )
return head_mask
def __lowercase ( snake_case_ : List[Any] ,snake_case_ : Any ,snake_case_ : Optional[int] ,snake_case_ : Tuple ) ->str:
'''simple docstring'''
__A : Any = datetime.now()
__A , __A , __A : int = compute_heads_importance(
snake_case_ ,snake_case_ ,snake_case_ ,compute_entropy=snake_case_ ,compute_importance=snake_case_ ,head_mask=snake_case_ )
__A : Optional[Any] = 1 / loss
__A : Tuple = datetime.now() - before_time
__A : int = sum(p.numel() for p in model.parameters() )
__A : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case_ ,snake_case_ ):
__A : Tuple = [
v,
]
assert sum(len(snake_case_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case_ )
__A : Any = sum(p.numel() for p in model.parameters() )
__A : Dict = datetime.now()
__A , __A , __A : Tuple = compute_heads_importance(
snake_case_ ,snake_case_ ,snake_case_ ,compute_entropy=snake_case_ ,compute_importance=snake_case_ ,head_mask=snake_case_ ,actually_pruned=snake_case_ ,)
__A : Optional[int] = 1 / loss
__A : Any = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' ,snake_case_ ,snake_case_ ,pruned_num_params / original_num_params * 100 ,)
logger.info('''Pruning: score with masking: %f score with pruning: %f''' ,snake_case_ ,snake_case_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' ,original_time / new_time * 100 )
save_model(snake_case_ ,args.output_dir )
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' ,default=snake_case_ ,type=snake_case_ ,required=snake_case_ ,help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' ,)
parser.add_argument(
'''--model_name_or_path''' ,default=snake_case_ ,type=snake_case_ ,required=snake_case_ ,help='''Path to pretrained model or model identifier from huggingface.co/models''' ,)
parser.add_argument(
'''--output_dir''' ,default=snake_case_ ,type=snake_case_ ,required=snake_case_ ,help='''The output directory where the model predictions and checkpoints will be written.''' ,)
# Other parameters
parser.add_argument(
'''--config_name''' ,default='''''' ,type=snake_case_ ,help='''Pretrained config name or path if not the same as model_name_or_path''' ,)
parser.add_argument(
'''--tokenizer_name''' ,default='''''' ,type=snake_case_ ,help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' ,)
parser.add_argument(
'''--cache_dir''' ,default=snake_case_ ,type=snake_case_ ,help='''Where do you want to store the pre-trained models downloaded from s3''' ,)
parser.add_argument(
'''--data_subset''' ,type=snake_case_ ,default=-1 ,help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' ,action='''store_true''' ,help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' ,action='''store_true''' ,help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' ,action='''store_true''' ,help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' ,action='''store_true''' ,help='''Don\'t normalize all importance scores between 0 and 1''' ,)
parser.add_argument(
'''--try_masking''' ,action='''store_true''' ,help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' ,default=0.9 ,type=snake_case_ ,help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' ,)
parser.add_argument(
'''--masking_amount''' ,default=0.1 ,type=snake_case_ ,help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' ,default='''acc''' ,type=snake_case_ ,help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' ,default=128 ,type=snake_case_ ,help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) ,)
parser.add_argument('''--batch_size''' ,default=1 ,type=snake_case_ ,help='''Batch size.''' )
parser.add_argument('''--seed''' ,type=snake_case_ ,default=42 )
parser.add_argument('''--local_rank''' ,type=snake_case_ ,default=-1 ,help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' ,action='''store_true''' ,help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' ,type=snake_case_ ,default='''''' ,help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' ,type=snake_case_ ,default='''''' ,help='''Can be used for distant debugging.''' )
__A : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=snake_case_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__A : List[Any] = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
__A : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__A : List[Any] = torch.device('''cuda''' ,args.local_rank )
__A : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device ,args.n_gpu ,bool(args.local_rank != -1 ) ) )
__A : int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__A : Union[str, Any] = nn.parallel.DistributedDataParallel(
snake_case_ ,device_ids=[args.local_rank] ,output_device=args.local_rank ,find_unused_parameters=snake_case_ )
elif args.n_gpu > 1:
__A : Union[str, Any] = nn.DataParallel(snake_case_ )
# Print/save training arguments
os.makedirs(args.output_dir ,exist_ok=snake_case_ )
torch.save(snake_case_ ,os.path.join(args.output_dir ,'''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' ,snake_case_ )
# Prepare dataset
__A : Tuple = np.concatenate(
[
np.loadtxt(args.data_dir ,dtype=np.intaa ),
] )
__A : str = (torch.from_numpy(snake_case_ ),)
__A : List[str] = TensorDataset(*snake_case_ )
__A : Tuple = RandomSampler(snake_case_ )
__A : int = DataLoader(snake_case_ ,sampler=snake_case_ ,batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case_ ,snake_case_ ,snake_case_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__A : List[str] = mask_heads(snake_case_ ,snake_case_ ,snake_case_ )
prune_heads(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
if __name__ == "__main__":
main()
| 179
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """roberta"""
def __init__( self , __lowerCamelCase=5_0265 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
__A : str = vocab_size
__A : Any = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : List[Any] = hidden_act
__A : Any = intermediate_size
__A : List[str] = hidden_dropout_prob
__A : List[Any] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : str = type_vocab_size
__A : int = initializer_range
__A : Tuple = layer_norm_eps
__A : str = position_embedding_type
__A : int = use_cache
__A : Optional[int] = classifier_dropout
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCamelCase__( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__A : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__A : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 179
| 1
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args)
| 329
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = 'philschmid/bart-large-cnn-samsum'
UpperCAmelCase__ : str = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
UpperCAmelCase__ : List[str] = 'summarizer'
UpperCAmelCase__ : Tuple = AutoTokenizer
UpperCAmelCase__ : Tuple = AutoModelForSeqaSeqLM
UpperCAmelCase__ : Any = ['text']
UpperCAmelCase__ : List[str] = ['text']
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Optional[int] ):
return self.pre_processor(UpperCamelCase_ , return_tensors="""pt""" , truncation=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] ):
return self.model.generate(**UpperCamelCase_ )[0]
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] ):
return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
| 12
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ):
snake_case : List[str] = []
snake_case : Optional[int] = []
snake_case : Any = []
for rt in rc.restypes:
snake_case : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
snake_case : str = {name: i for i, name in enumerate(__lowerCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
snake_case : Optional[Any] = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
snake_case : List[Any] = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
snake_case : int = torch.tensor(
__lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
snake_case : int = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
snake_case : List[Any] = restype_atomaa_to_atomaa[protein_aatype]
snake_case : str = restype_atomaa_mask[protein_aatype]
snake_case : str = residx_atomaa_mask
snake_case : Any = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
snake_case : List[str] = restype_atomaa_to_atomaa[protein_aatype]
snake_case : List[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
snake_case : Union[str, Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
snake_case : Optional[int] = rc.restype_atoa[restype_letter]
snake_case : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
snake_case : List[Any] = rc.atom_order[atom_name]
snake_case : Optional[Any] = 1
snake_case : List[Any] = restype_atomaa_mask[protein_aatype]
snake_case : int = residx_atomaa_mask
return protein
def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ):
snake_case : Dict = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray )
snake_case : List[str] = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) )
return out
| 59
| 0
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __snake_case ):
_lowerCamelCase = (DDIMParallelScheduler,)
_lowerCamelCase = (('eta', 0.0), ('num_inference_steps', 50))
def UpperCamelCase ( self , **lowercase_ ):
_snake_case : str = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowercase_ )
return config
def UpperCamelCase ( self , **lowercase_ ):
_snake_case : Any = self.scheduler_classes[0]
_snake_case : Dict = self.get_scheduler_config(**lowercase_ )
_snake_case : Dict = scheduler_class(**lowercase_ )
_snake_case : int = 10, 0.0
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : int = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for t in scheduler.timesteps:
_snake_case : Any = model(lowercase_ , lowercase_ )
_snake_case : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase ( self ):
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
_snake_case : str = self.scheduler_classes[0]
_snake_case : Optional[int] = self.get_scheduler_config(steps_offset=1 )
_snake_case : Any = scheduler_class(**lowercase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def UpperCamelCase ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase_ )
def UpperCamelCase ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase_ )
def UpperCamelCase ( self ):
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def UpperCamelCase ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase ( self ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ )
def UpperCamelCase ( self ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase_ , eta=lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Tuple = self.scheduler_classes[0]
_snake_case : int = self.get_scheduler_config()
_snake_case : str = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def UpperCamelCase ( self ):
_snake_case : int = self.scheduler_classes[0]
_snake_case : Optional[Any] = self.get_scheduler_config()
_snake_case : Any = scheduler_class(**lowercase_ )
_snake_case : Optional[Any] = 10, 0.0
scheduler.set_timesteps(lowercase_ )
_snake_case : Optional[int] = self.dummy_model()
_snake_case : Tuple = self.dummy_sample_deter
_snake_case : str = self.dummy_sample_deter + 0.1
_snake_case : int = self.dummy_sample_deter - 0.1
_snake_case : List[Any] = samplea.shape[0]
_snake_case : Dict = torch.stack([samplea, samplea, samplea] , dim=0 )
_snake_case : List[Any] = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ )
_snake_case : Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_snake_case : int = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ )
_snake_case : Optional[int] = torch.sum(torch.abs(lowercase_ ) )
_snake_case : Optional[int] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.full_loop()
_snake_case : Optional[int] = torch.sum(torch.abs(lowercase_ ) )
_snake_case : Optional[Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
_snake_case : Optional[int] = torch.sum(torch.abs(lowercase_ ) )
_snake_case : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
_snake_case : str = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
_snake_case : List[str] = torch.sum(torch.abs(lowercase_ ) )
_snake_case : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
_snake_case : Any = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
_snake_case : Optional[int] = torch.sum(torch.abs(lowercase_ ) )
_snake_case : Optional[int] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 359
|
from __future__ import annotations
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
_snake_case : Any = sorted(numsa + numsa )
_snake_case ,_snake_case : Any = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Union[str, Any] = [float(x) for x in input('Enter the elements of first array: ').split()]
__SCREAMING_SNAKE_CASE : List[Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 284
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : int = 16
a : Optional[Any] = 32
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = 16 ) -> str:
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=__UpperCAmelCase, max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ = datasets.map(
__UpperCAmelCase, batched=__UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ = 16
elif accelerator.mixed_precision != "no":
snake_case_ = 8
else:
snake_case_ = None
return tokenizer.pad(
__UpperCAmelCase, padding='''longest''', max_length=__UpperCAmelCase, pad_to_multiple_of=__UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['''train'''], shuffle=__UpperCAmelCase, collate_fn=__UpperCAmelCase, batch_size=__UpperCAmelCase )
snake_case_ = DataLoader(
tokenized_datasets['''validation'''], shuffle=__UpperCAmelCase, collate_fn=__UpperCAmelCase, batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a : Dict = mocked_dataloaders # noqa: F811
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', __UpperCAmelCase ) == "1":
snake_case_ = 2
# Initialize accelerator
snake_case_ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['''lr''']
snake_case_ = int(config['''num_epochs'''] )
snake_case_ = int(config['''seed'''] )
snake_case_ = int(config['''batch_size'''] )
snake_case_ = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ = MAX_GPU_BATCH_SIZE
set_seed(__UpperCAmelCase )
snake_case_ ,snake_case_ = get_dataloaders(__UpperCAmelCase, __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ = AdamW(params=model.parameters(), lr=__UpperCAmelCase )
# Instantiate scheduler
snake_case_ = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = accelerator.prepare(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case_ = 0
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ ,snake_case_ = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCAmelCase, references=__UpperCAmelCase, )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:", __UpperCAmelCase )
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=__UpperCAmelCase, default=__UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
snake_case_ = parser.parse_args()
snake_case_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase, __UpperCAmelCase )
if __name__ == "__main__":
main()
| 56
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[str]:
__lowercase : Optional[int] = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
__lowercase : Union[str, Any] = to_pil_image(__lowerCAmelCase )
__lowercase , __lowercase : Any = pil_image.size
__lowercase : Union[str, Any] = pytesseract.image_to_data(__lowerCAmelCase , lang=__lowerCAmelCase , output_type='''dict''' , config=__lowerCAmelCase )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : int = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__lowercase : str = [idx for idx, word in enumerate(__lowerCAmelCase ) if not word.strip()]
__lowercase : List[Any] = [word for idx, word in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
__lowercase : Tuple = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
__lowercase : Any = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
__lowercase : str = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
__lowercase : str = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowercase : List[Any] = []
for x, y, w, h in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
__lowercase : int = [x, y, x + w, y + h]
actual_boxes.append(__lowerCAmelCase )
# finally, normalize the bounding boxes
__lowercase : str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Dict = ['''pixel_values''']
def __init__( self : str , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Optional[str] = None , _snake_case : Optional[str] = "" , **_snake_case : Union[str, Any] , ):
super().__init__(**_snake_case )
__lowercase : Optional[int] = size if size is not None else {'''height''': 224, '''width''': 224}
__lowercase : Optional[int] = get_size_dict(_snake_case )
__lowercase : Optional[int] = do_resize
__lowercase : List[str] = size
__lowercase : Optional[Any] = resample
__lowercase : str = apply_ocr
__lowercase : List[Any] = ocr_lang
__lowercase : Optional[int] = tesseract_config
def snake_case_ ( self : str , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any , ):
__lowercase : Optional[Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
__lowercase : Dict = (size['''height'''], size['''width'''])
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : int , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Optional[str] = None , _snake_case : Optional[str] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : Optional[int] , ):
__lowercase : str = do_resize if do_resize is not None else self.do_resize
__lowercase : int = size if size is not None else self.size
__lowercase : Dict = get_size_dict(_snake_case )
__lowercase : Union[str, Any] = resample if resample is not None else self.resample
__lowercase : int = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowercase : List[str] = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowercase : Union[str, Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowercase : Union[str, Any] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
__lowercase : Optional[int] = [to_numpy_array(_snake_case ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
__lowercase : Optional[int] = []
__lowercase : Tuple = []
for image in images:
__lowercase , __lowercase : Dict = apply_tesseract(_snake_case , _snake_case , _snake_case )
words_batch.append(_snake_case )
boxes_batch.append(_snake_case )
if do_resize:
__lowercase : int = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowercase : Tuple = [flip_channel_order(_snake_case ) for image in images]
__lowercase : int = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
__lowercase : Optional[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_snake_case )
if apply_ocr:
__lowercase : str = words_batch
__lowercase : int = boxes_batch
return data
| 156
| 0
|
"""simple docstring"""
def lowerCamelCase ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
UpperCAmelCase : List[str] = generate_large_matrix()
UpperCAmelCase : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> None:
'''simple docstring'''
assert all(row == sorted(a__ , reverse=a__ ) for row in grid )
assert all(list(a__ ) == sorted(a__ , reverse=a__ ) for col in zip(*a__ ) )
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = len(a__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : Any = (left + right) // 2
__UpperCAmelCase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : str = mid + 1
else:
__UpperCAmelCase : Any = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a__ )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : str = len(grid[0] )
for i in range(len(a__ ) ):
__UpperCAmelCase : Optional[Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(a__ ) * len(grid[0] )) - total
def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase ( _UpperCamelCase : Any ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
for row in grid:
for i, number in enumerate(a__ ):
if number < 0:
total += len(a__ ) - i
break
return total
def lowerCamelCase ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("""Running benchmarks""" )
__UpperCAmelCase : Optional[int] = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Optional[Any] = timeit(f'''{func}(grid=grid)''' , setup=a__ , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 369
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = LEDTokenizer
__a = LEDTokenizerFast
__a = True
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase ) )
def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , UpperCamelCase )
self.assertIn("""attention_mask""" , UpperCamelCase )
self.assertNotIn("""labels""" , UpperCamelCase )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : str = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""]
__UpperCAmelCase : int = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Optional[Any] = inputs["""input_ids"""]
__UpperCAmelCase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""]
__UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase )
__UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]]
__UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Any = """A, <mask> AllenNLP sentence."""
__UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
__UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 320
| 0
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : str = -1
__SCREAMING_SNAKE_CASE : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__SCREAMING_SNAKE_CASE : Dict = cs.out[:-1]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __magic_name__( self :List[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__SCREAMING_SNAKE_CASE : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = -1
__SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0] )
__SCREAMING_SNAKE_CASE : Tuple = TextIteratorStreamer(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : str = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__SCREAMING_SNAKE_CASE : Optional[Any] = Thread(target=model.generate , kwargs=lowerCamelCase_ )
thread.start()
__SCREAMING_SNAKE_CASE : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __magic_name__( self :List[str] ) -> Any:
__SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Dict = -1
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Tuple = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Dict = greedy_ids[:, input_ids.shape[1] :]
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__SCREAMING_SNAKE_CASE : Any = TextStreamer(lowerCamelCase_ , skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__SCREAMING_SNAKE_CASE : Union[str, Any] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __magic_name__( self :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = -1
__SCREAMING_SNAKE_CASE : Any = torch.ones((1, 5) , device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__SCREAMING_SNAKE_CASE : Dict = TextStreamer(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ , max_new_tokens=1 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__SCREAMING_SNAKE_CASE : List[str] = cs.out[:-1] # Remove the final "\n"
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(lowerCamelCase_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __magic_name__( self :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = -1
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = TextIteratorStreamer(lowerCamelCase_ , timeout=0.001 )
__SCREAMING_SNAKE_CASE : List[str] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__SCREAMING_SNAKE_CASE : Any = Thread(target=model.generate , kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 9
|
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323
| 0
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_snake_case : int = "scheduler_config.json"
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : List[Any] = 4
__UpperCAmelCase : Tuple = 5
@dataclass
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
class a :
"""simple docstring"""
__UpperCAmelCase : Dict = SCHEDULER_CONFIG_NAME
__UpperCAmelCase : Union[str, Any] = ["dtype"]
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : int = True
@classmethod
def __snake_case ( cls : List[str] , lowerCamelCase : Dict[str, Any] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : List[str]=False , **lowerCamelCase : Union[str, Any] , ) -> List[str]:
__snake_case , __snake_case : List[str] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase , subfolder=lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase , )
__snake_case , __snake_case : Dict = cls.from_config(lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase )
if hasattr(lowerCamelCase , "create_state" ) and getattr(lowerCamelCase , "has_state" , lowerCamelCase ):
__snake_case : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __snake_case ( self : Any , lowerCamelCase : Union[str, os.PathLike] , lowerCamelCase : bool = False , **lowerCamelCase : List[Any] ) -> int:
self.save_config(save_directory=lowerCamelCase , push_to_hub=lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Tuple ) -> List[Any]:
return self._get_compatibles()
@classmethod
def __snake_case ( cls : int ) -> Dict:
__snake_case : Tuple = list(set([cls.__name__] + cls._compatibles ) )
__snake_case : int = importlib.import_module(__name__.split("." )[0] )
__snake_case : Tuple = [
getattr(lowerCamelCase , lowerCamelCase ) for c in compatible_classes_str if hasattr(lowerCamelCase , lowerCamelCase )
]
return compatible_classes
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
assert len(__lowerCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__lowerCamelCase ) - x.ndim) ) , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=0.9_9_9 , __lowerCamelCase=jnp.floataa ):
def alpha_bar(__lowerCamelCase ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__snake_case : List[Any] = []
for i in range(__lowerCamelCase ):
__snake_case : Dict = i / num_diffusion_timesteps
__snake_case : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__lowerCamelCase ) / alpha_bar(__lowerCamelCase ) , __lowerCamelCase ) )
return jnp.array(__lowerCamelCase , dtype=__lowerCamelCase )
@flax.struct.dataclass
class a :
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
@classmethod
def __snake_case ( cls : Union[str, Any] , lowerCamelCase : int ) -> List[Any]:
__snake_case : Dict = scheduler.config
if config.trained_betas is not None:
__snake_case : Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__snake_case : Optional[int] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case : Optional[Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
__snake_case : Any = 1.0 - betas
__snake_case : int = jnp.cumprod(lowerCamelCase , axis=0 )
return cls(
alphas=lowerCamelCase , betas=lowerCamelCase , alphas_cumprod=lowerCamelCase , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = state.alphas_cumprod
__snake_case : str = alphas_cumprod[timesteps] ** 0.5
__snake_case : Dict = sqrt_alpha_prod.flatten()
__snake_case : str = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape )
__snake_case : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
__snake_case : str = sqrt_one_minus_alpha_prod.flatten()
__snake_case : Tuple = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Union[str, Any] = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Dict = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : Optional[int] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 134
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["image_processor", "tokenizer"]
__UpperCAmelCase : str = "OwlViTImageProcessor"
__UpperCAmelCase : Dict = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : str , lowerCamelCase : Any=None , lowerCamelCase : Any=None , **lowerCamelCase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Union[str, Any] , lowerCamelCase : Tuple=None , lowerCamelCase : int=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]="max_length" , lowerCamelCase : Dict="np" , **lowerCamelCase : str ) -> List[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCamelCase , lowerCamelCase ) or (isinstance(lowerCamelCase , lowerCamelCase ) and not isinstance(text[0] , lowerCamelCase )):
__snake_case : Union[str, Any] = [self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )]
elif isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(text[0] , lowerCamelCase ):
__snake_case : Tuple = []
# Maximum number of queries across batch
__snake_case : str = max([len(lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase ) != max_num_queries:
__snake_case : Dict = t + [" "] * (max_num_queries - len(lowerCamelCase ))
__snake_case : int = self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
encodings.append(lowerCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__snake_case : Any = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__snake_case : List[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Any = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__snake_case : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__snake_case : int = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__snake_case : int = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Dict = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__snake_case : Any = BatchEncoding()
__snake_case : Tuple = input_ids
__snake_case : int = attention_mask
if query_images is not None:
__snake_case : List[Any] = BatchEncoding()
__snake_case : Union[str, Any] = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ).pixel_values
__snake_case : str = query_pixel_values
if images is not None:
__snake_case : Optional[int] = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__snake_case : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def __snake_case ( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> str:
return self.image_processor.post_process(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : str , **lowerCamelCase : List[str] ) -> Tuple:
return self.image_processor.post_process_object_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> Any:
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> str:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> Tuple:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Any ) -> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def __snake_case ( self : List[str] ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 134
| 1
|
def lowerCamelCase_ ( _a : str ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
UpperCAmelCase_ : Tuple = sorted(string.lower() )
return len(_a ) == len(set(_a ) )
if __name__ == "__main__":
UpperCamelCase_ = input('''Enter a string ''').strip()
UpperCamelCase_ = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 345
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_lowerCamelCase : int = TypeVar('''T''')
def a_ ( __lowercase : int ) -> int:
return (position - 1) // 2
def a_ ( __lowercase : int ) -> int:
return (2 * position) + 1
def a_ ( __lowercase : int ) -> int:
return (2 * position) + 2
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[Any] ):
'''simple docstring'''
_snake_case = []
_snake_case = {}
_snake_case = 0
def __len__( self : str ):
'''simple docstring'''
return self.elements
def __repr__( self : Tuple ):
'''simple docstring'''
return str(self.heap )
def A ( self : Tuple ):
'''simple docstring'''
return self.elements == 0
def A ( self : List[str] , lowercase : T , lowercase : int ):
'''simple docstring'''
self.heap.append((elem, weight) )
_snake_case = self.elements
self.elements += 1
self._bubble_up(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_snake_case , _snake_case = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_snake_case , _snake_case = self.heap[0]
self._bubble_down(lowercase )
return elem
def A ( self : Optional[Any] , lowercase : T , lowercase : int ):
'''simple docstring'''
_snake_case = self.position_map[elem]
_snake_case = (elem, weight)
if position > 0:
_snake_case = get_parent_position(lowercase )
_snake_case , _snake_case = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowercase )
else:
self._bubble_down(lowercase )
else:
self._bubble_down(lowercase )
def A ( self : Any , lowercase : T ):
'''simple docstring'''
_snake_case = self.position_map[elem]
if curr_pos == 0:
return None
_snake_case = get_parent_position(lowercase )
_snake_case , _snake_case = self.heap[curr_pos]
_snake_case , _snake_case = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowercase , lowercase )
return self._bubble_up(lowercase )
return None
def A ( self : Union[str, Any] , lowercase : T ):
'''simple docstring'''
_snake_case = self.position_map[elem]
_snake_case , _snake_case = self.heap[curr_pos]
_snake_case = get_child_left_position(lowercase )
_snake_case = get_child_right_position(lowercase )
if child_left_position < self.elements and child_right_position < self.elements:
_snake_case , _snake_case = self.heap[child_left_position]
_snake_case , _snake_case = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowercase , lowercase )
return self._bubble_down(lowercase )
if child_left_position < self.elements:
_snake_case , _snake_case = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowercase , lowercase )
return self._bubble_down(lowercase )
else:
return None
if child_right_position < self.elements:
_snake_case , _snake_case = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowercase , lowercase )
return self._bubble_down(lowercase )
return None
def A ( self : List[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
_snake_case = self.heap[nodea_pos][0]
_snake_case = self.heap[nodea_pos][0]
_snake_case , _snake_case = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_snake_case = nodea_pos
_snake_case = nodea_pos
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
_snake_case = {}
_snake_case = 0
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return str(self.connections )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.nodes
def A ( self : Tuple , lowercase : T ):
'''simple docstring'''
if node not in self.connections:
_snake_case = {}
self.nodes += 1
def A ( self : Union[str, Any] , lowercase : T , lowercase : T , lowercase : int ):
'''simple docstring'''
self.add_node(lowercase )
self.add_node(lowercase )
_snake_case = weight
_snake_case = weight
def a_ ( __lowercase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
_snake_case = {node: maxsize for node in graph.connections}
_snake_case = {node: None for node in graph.connections}
_snake_case = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase , __lowercase )
if priority_queue.is_empty():
return dist, parent
# initialization
_snake_case = priority_queue.extract_min()
_snake_case = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_snake_case = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour] )
_snake_case = node
# running prim's algorithm
while not priority_queue.is_empty():
_snake_case = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_snake_case = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour] )
_snake_case = node
return dist, parent
| 369
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "canine"
def __init__( self : int , lowercase : Optional[int]=768 , lowercase : Tuple=12 , lowercase : Union[str, Any]=12 , lowercase : Optional[int]=3_072 , lowercase : Tuple="gelu" , lowercase : Optional[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : int=16_384 , lowercase : Optional[int]=16 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-12 , lowercase : Optional[Any]=0 , lowercase : Dict=0xE000 , lowercase : Optional[Any]=0xE001 , lowercase : Union[str, Any]=4 , lowercase : str=4 , lowercase : Optional[int]=8 , lowercase : List[str]=16_384 , lowercase : Union[str, Any]=128 , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
# Character config:
_snake_case = downsampling_rate
_snake_case = upsampling_kernel_size
_snake_case = num_hash_functions
_snake_case = num_hash_buckets
_snake_case = local_transformer_stride
| 130
| 0
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : str ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCAmelCase_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 109
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_snake_case : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_snake_case : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_snake_case : set[int] = {ord(char) for char in VALID_CHARS}
_snake_case : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCAmelCase_ : list[int], lowerCAmelCase_ : tuple[int, ...] ):
__lowerCAmelCase = ""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(lowerCAmelCase_ ), lowerCAmelCase_ ):
__lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase_ )
return decoded
def a_ ( lowerCAmelCase_ : list[int] ):
__lowerCAmelCase = []
for key in product(lowerCAmelCase_, repeat=3 ):
__lowerCAmelCase = try_key(lowerCAmelCase_, lowerCAmelCase_ )
if encoded is not None:
possibles.append(lowerCAmelCase_ )
return possibles
def a_ ( lowerCAmelCase_ : list[str], lowerCAmelCase_ : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCAmelCase_ : str = "p059_cipher.txt" ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = Path(lowerCAmelCase_ ).parent.joinpath(lowerCAmelCase_ ).read_text(encoding='utf-8' )
__lowerCAmelCase = [int(lowerCAmelCase_ ) for number in data.strip().split(',' )]
__lowerCAmelCase = filter_valid_chars(lowerCAmelCase_ )
for common_word in COMMON_WORDS:
__lowerCAmelCase = filter_common_word(lowerCAmelCase_, lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 1:
break
__lowerCAmelCase = possibles[0]
return sum(ord(lowerCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 284
| 0
|
def UpperCAmelCase_ ( __UpperCAmelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
SCREAMING_SNAKE_CASE_ = set()
# Replace all the whitespace in our sentence
SCREAMING_SNAKE_CASE_ = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__UpperCAmelCase ) == 26
def UpperCAmelCase_ ( __UpperCAmelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
SCREAMING_SNAKE_CASE_ = [False] * 26
for char in input_str:
if char.islower():
SCREAMING_SNAKE_CASE_ = True
elif char.isupper():
SCREAMING_SNAKE_CASE_ = True
return all(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
SCREAMING_SNAKE_CASE_ = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=__UpperCAmelCase ) )
print(timeit('is_pangram_faster()' , setup=__UpperCAmelCase ) )
print(timeit('is_pangram_fastest()' , setup=__UpperCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 210
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any=7 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[str]=18 , _lowerCAmelCase : Any=30 , _lowerCAmelCase : List[Any]=400 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]=[0.5, 0.5, 0.5] , _lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE_ = size if size is not None else {'shortest_edge': 18}
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def lowerCAmelCase_ ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = LevitImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCAmelCase_ ( self : Dict ):
pass
def lowerCAmelCase_ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : Tuple ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 210
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 63
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
pass
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = np.array(_lowerCAmelCase )
_a = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Any = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A_ : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_a = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> int:
_a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
_a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Any:
_a = '''facebook/sam-vit-huge'''
_a = pipeline('''mask-generation''' , model=__UpperCAmelCase )
_a = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
] , )
| 320
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCamelCase__ ( lowercase__ ):
"""simple docstring"""
__a = 0
__a = False
__a = 3.0
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCamelCase ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__UpperCAmelCase : Union[str, Any] = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCAmelCase : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _UpperCamelCase )
@require_multi_gpu
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
UpperCAmelCase : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
UpperCAmelCase : List[str] = torch.nn.Linear(100, 200)
UpperCAmelCase : str = accelerator.prepare(model)
# Check the values changed in kwargs
UpperCAmelCase : str = ''
UpperCAmelCase : List[Any] = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 358
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
pass
def lowerCamelCase ( _UpperCamelCase : Image ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = np.array(_UpperCamelCase )
__UpperCAmelCase : List[Any] = npimg.shape
return {"hash": hashimage(_UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__a = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
__UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
__UpperCAmelCase : int = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = """facebook/sam-vit-huge"""
__UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase )
__UpperCAmelCase : int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__UpperCAmelCase : Dict = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
] , )
| 320
| 0
|
'''simple docstring'''
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
for n in range(1, 1_000_000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( __snake_case : Any ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[int] =1
A__ : Any =2
while i * i <= n:
A__ : Optional[int] =0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(__snake_case ) > 500 )
if __name__ == "__main__":
print(solution())
| 134
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
A__ : list[float] =list(lowerCAmelCase_ )
A__ : Optional[int] =degree
def __add__( self : Union[str, Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
A__ : int =self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
A__ : Any =polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : str , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[Any] ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : str , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : int | float ) -> int | float:
'''simple docstring'''
A__ : int | float =0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[str] ) -> str:
'''simple docstring'''
A__ : Optional[int] =""""""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.__str__()
def lowercase__ ( self : str ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * self.degree
for i in range(self.degree ):
A__ : Union[str, Any] =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : int | float = 0 ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * (self.degree + 2)
A__ : Any =constant
for i in range(self.degree + 1 ):
A__ : str =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Optional[int] , lowerCAmelCase_ : object ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Optional[Any] , lowerCAmelCase_ : object ) -> bool:
'''simple docstring'''
return not self.__eq__(lowerCAmelCase_ )
| 134
| 1
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __UpperCamelCase :
def __init__(self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=1_3 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Any=9_9 , __SCREAMING_SNAKE_CASE : str=3_2 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : Any=3_7 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=5_1_2 , __SCREAMING_SNAKE_CASE : Any=1_6 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Tuple=0.0_2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Optional[int]=None , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length])
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A = ids_tensor([self.batch_size] , self.num_choices)
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ (self : int):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE__ (self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
A = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
A = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , ):
A = True
A = OpenLlamaModel(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
A = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ (self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , ):
A = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ (self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , ):
A = True
A = True
A = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
# first forward pass
A = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size)
A = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
A = torch.cat([input_ids, next_tokens] , dim=-1)
A = torch.cat([input_mask, next_mask] , dim=-1)
A = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )["hidden_states"][0]
A = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )["hidden_states"][0]
# select random slice
A = ids_tensor((1,) , output_from_past.shape[-1]).item()
A = output_from_no_past[:, -3:, random_slice_idx].detach()
A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3))
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _A , _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ (self : Any):
A = OpenLlamaModelTester(self)
A = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7)
def SCREAMING_SNAKE_CASE__ (self : int):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = input_dict["input_ids"]
A = input_ids.ne(1).to(__SCREAMING_SNAKE_CASE)
A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
A = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = "single_label_classification"
A = input_dict["input_ids"]
A = input_ids.ne(1).to(__SCREAMING_SNAKE_CASE)
A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
A = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = "multi_label_classification"
A = input_dict["input_ids"]
A = input_ids.ne(1).to(__SCREAMING_SNAKE_CASE)
A = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
A = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test")
def SCREAMING_SNAKE_CASE__ (self : int):
pass
@parameterized.expand([("linear",), ("dynamic",)])
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : Any):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = ids_tensor([1, 1_0] , config.vocab_size)
A = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
A = OpenLlamaModel(__SCREAMING_SNAKE_CASE)
original_model.to(__SCREAMING_SNAKE_CASE)
original_model.eval()
A = original_model(__SCREAMING_SNAKE_CASE).last_hidden_state
A = original_model(__SCREAMING_SNAKE_CASE).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
A = {"type": scaling_type, "factor": 1_0.0}
A = OpenLlamaModel(__SCREAMING_SNAKE_CASE)
scaled_model.to(__SCREAMING_SNAKE_CASE)
scaled_model.eval()
A = scaled_model(__SCREAMING_SNAKE_CASE).last_hidden_state
A = scaled_model(__SCREAMING_SNAKE_CASE).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5))
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5))
| 57
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
# Load configuration defined in the metadata file
with open(lowercase__ ) as metadata_file:
A = json.load(lowercase__ )
A = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A = torch.load(lowercase__ , map_location="cpu" )
# Load the entity vocab file
A = load_entity_vocab(lowercase__ )
A = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A = AddedToken("<ent>" , lstrip=lowercase__ , rstrip=lowercase__ )
A = AddedToken("<ent2>" , lstrip=lowercase__ , rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
A = LukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
A = state_dict["embeddings.word_embeddings.weight"]
A = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A = F"""encoder.layer.{layer_index}.attention.self."""
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A = state_dict["entity_embeddings.entity_embeddings.weight"]
A = entity_emb[entity_vocab["[MASK]"]]
A = LukeModel(config=lowercase__ ).eval()
A , A = model.load_state_dict(lowercase__ , strict=lowercase__ )
if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
A = LukeTokenizer.from_pretrained(lowercase__ , task="entity_classification" )
A = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A = (39, 42)
A = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors="pt" )
A = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
A = torch.Size((1, 42, 1_024) )
A = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
A = torch.Size((1, 42, 768) )
A = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A = torch.Size((1, 1, 1_024) )
A = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
A = torch.Size((1, 1, 768) )
A = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = {}
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(lowercase__ ):
A , A = line.rstrip().split("\t" )
A = index
return entity_vocab
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__A : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 57
| 1
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
__magic_name__ = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = Github(os.environ["""GITHUB_TOKEN"""] )
__SCREAMING_SNAKE_CASE = g.get_repo("""huggingface/transformers""" )
__SCREAMING_SNAKE_CASE = repo.get_issues(state="""open""" )
for issue in open_issues:
__SCREAMING_SNAKE_CASE = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase_ : i.created_at , reverse=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = comments[0] if len(lowerCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 100
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : str = tempfile.mkdtemp()
lowercase__ : Optional[Any] = 8
# DPR tok
lowercase__ : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : List[Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : List[str] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[Any] = {"unk_token": "<unk>"}
lowercase__ : Any = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Any ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def snake_case ( self : Any ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def snake_case ( self : Any ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def snake_case ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Optional[int] ):
lowercase__ : int = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def snake_case ( self : List[str] ):
lowercase__ : Union[str, Any] = self.get_dummy_dataset()
lowercase__ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Union[str, Any] = dataset
lowercase__ : List[str] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : bool ):
lowercase__ : Union[str, Any] = self.get_dummy_dataset()
lowercase__ : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Any = os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : Tuple = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Dict = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE ) , )
return retriever
def snake_case ( self : Tuple ):
lowercase__ : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : Optional[int] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : List[str] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE , open(SCREAMING_SNAKE_CASE , "wb" ) )
lowercase__ : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Any = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def snake_case ( self : int ):
lowercase__ : Any = 1
lowercase__ : str = self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : str ):
lowercase__ : Dict = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : str ):
lowercase__ : Union[str, Any] = 1
lowercase__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[Any] = 1
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : List[str] ):
lowercase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : int = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = 1
lowercase__ : List[str] = self.get_dummy_legacy_index_retriever()
lowercase__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : Dict ):
lowercase__ : Optional[int] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case ( self : Any ):
import torch
lowercase__ : List[Any] = 1
lowercase__ : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Tuple = [[5, 7], [10, 11]]
lowercase__ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : int = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ , lowercase__ : List[str] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
lowercase__ : List[str] = retriever(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case ( self : int ):
lowercase__ : List[Any] = self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Optional[int] = 1
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [[5, 7], [10, 11]]
lowercase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any] = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(
len(SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 130
| 0
|
import heapq
def UpperCamelCase_( _snake_case : dict ):
"""simple docstring"""
__a =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_snake_case , [-1 * len(_snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
__a =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__a =heapq.heappop(_snake_case )[1][0]
chosen_vertices.add(_snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__a =elem[1][1].index(_snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : int = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 364
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
__a =model.config
__a =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
__a =MBartConfig(
is_decoder=_snake_case , is_encoder_decoder=_snake_case , add_cross_attention=_snake_case , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_snake_case , add_final_layer_norm=_snake_case , )
return encoder_config, decoder_config
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
if "encoder.model" in name:
__a =name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
__a =name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
__a =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__a =name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
__a ='encoder.' + name
if "attn.proj" in name:
__a =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
__a =name.replace('attn' , 'attention.self' )
if "norm1" in name:
__a =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__a =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__a =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__a =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__a ='encoder.layernorm.weight'
if name == "encoder.norm.bias":
__a ='encoder.layernorm.bias'
return name
def UpperCamelCase_( _snake_case : Tuple , _snake_case : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a =orig_state_dict.pop(_snake_case )
if "qkv" in key:
__a =key.split('.' )
__a =int(key_split[3] )
__a =int(key_split[5] )
__a =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a =val[:dim, :]
__a =val[dim : dim * 2, :]
__a =val[-dim:, :]
else:
__a =val[:dim]
__a =val[dim : dim * 2]
__a =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__a =val
return orig_state_dict
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=False ):
"""simple docstring"""
__a =DonutModel.from_pretrained(_snake_case ).eval()
# load HuggingFace model
__a , __a =get_configs(_snake_case )
__a =DonutSwinModel(_snake_case )
__a =MBartForCausalLM(_snake_case )
__a =VisionEncoderDecoderModel(encoder=_snake_case , decoder=_snake_case )
model.eval()
__a =original_model.state_dict()
__a =convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# verify results on scanned document
__a =load_dataset('hf-internal-testing/example-documents' )
__a =dataset['test'][0]['image'].convert('RGB' )
__a =XLMRobertaTokenizerFast.from_pretrained(_snake_case , from_slow=_snake_case )
__a =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__a =DonutProcessor(_snake_case , _snake_case )
__a =processor(_snake_case , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__a ='<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__a ='When is the coffee break?'
__a =task_prompt.replace('{user_input}' , _snake_case )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__a ='<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__a ='<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__a ='s_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__a ='<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__a ='hello world'
else:
raise ValueError('Model name not supported' )
__a =original_model.decoder.tokenizer(_snake_case , add_special_tokens=_snake_case , return_tensors='pt' )[
'input_ids'
]
__a =original_model.encoder.model.patch_embed(_snake_case )
__a , __a =model.encoder.embeddings(_snake_case )
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
# verify encoder hidden states
__a =original_model.encoder(_snake_case )
__a =model.encoder(_snake_case ).last_hidden_state
assert torch.allclose(_snake_case , _snake_case , atol=1e-2 )
# verify decoder hidden states
__a =original_model(_snake_case , _snake_case , _snake_case ).logits
__a =model(_snake_case , decoder_input_ids=_snake_case ).logits
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
| 0
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( lowercase = "isbn/0140328726" ):
"""simple docstring"""
__lowercase = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__lowercase = F"{olid} is not a valid Open Library olid"
raise ValueError(lowercase )
return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json()
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__lowercase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__lowercase = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__lowercase = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(lowercase , lowercase ):
__lowercase = ''', '''.join(lowercase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__a : int = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__a : Any = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print("""\n""".join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 210
|
import csv
import tweepy
# Twitter API credentials
__a : Union[str, Any] = """"""
__a : Union[str, Any] = """"""
__a : Union[str, Any] = """"""
__a : List[Any] = """"""
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = tweepy.OAuthHandler(lowercase , lowercase )
auth.set_access_token(lowercase , lowercase )
__lowercase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
__lowercase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowercase = api.user_timeline(screen_name=lowercase , count=200 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowercase = api.user_timeline(
screen_name=lowercase , count=200 , max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
print(F"...{len(lowercase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowercase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , '''w''' ) as f:
__lowercase = csv.writer(lowercase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 210
| 1
|
"""simple docstring"""
import random
class __A :
'''simple docstring'''
@staticmethod
def UpperCAmelCase ( _snake_case : int ) -> tuple[list[int], list[int]]:
"""simple docstring"""
lowercase__ : Dict = [ord(lowerCAmelCase_ ) for i in text]
lowercase__ : Dict = []
lowercase__ : Optional[int] = []
for i in plain:
lowercase__ : str = random.randint(1 ,300 )
lowercase__ : Dict = (i + k) * k
cipher.append(lowerCAmelCase_ )
key.append(lowerCAmelCase_ )
return cipher, key
@staticmethod
def UpperCAmelCase ( _snake_case : Dict ,_snake_case : Any ) -> str:
"""simple docstring"""
lowercase__ : Optional[int] = []
for i in range(len(lowerCAmelCase_ ) ):
lowercase__ : Optional[int] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCAmelCase_ ) )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ ,lowerCAmelCase_ = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 365
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302
| 0
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase_ = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None:
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def snake_case__( self : str ) ->List[Any]:
return self.sp_model.get_piece_size()
def snake_case__( self : int ) ->Union[str, Any]:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) ->Any:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple:
return self.sp_model.piece_to_id(_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase )
return token
def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]:
snake_case_ = []
snake_case_ = ''''''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
snake_case_ = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str:
snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase )
snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
snake_case_ = []
sub_texts.append(_UpperCamelCase )
else:
current_sub_text.append(_UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) )
else:
snake_case_ = ''''''.join(_UpperCamelCase )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(_UpperCamelCase )
return clean_text
else:
return text
def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 8
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'decision_transformer'
A_ : Union[str, Any] = ['past_key_values']
A_ : str = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=17 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=1024 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]:
_a = state_dim
_a = act_dim
_a = hidden_size
_a = max_ep_len
_a = action_tanh
_a = vocab_size
_a = n_positions
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = scale_attn_by_inverse_layer_idx
_a = reorder_and_upcast_attn
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 320
| 0
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE : int = TypeVar('_T')
class __A (Generic[_T]):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Iterable[_T] | None = None ) ->None:
"""simple docstring"""
snake_case_ = list(iterable or [] )
snake_case_ = []
def __len__( self : str ) ->int:
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Optional[Any] ) ->str:
"""simple docstring"""
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : _T ) ->None:
"""simple docstring"""
self._stacka.append(UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->_T:
"""simple docstring"""
snake_case_ = self._stacka.pop
snake_case_ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 233
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""})
__lowercase: Optional[str] = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""})
__lowercase: Optional[int] = field(default=2 , metadata={"""help""": """Batch size for training."""})
__lowercase: Optional[int] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""})
__lowercase: Optional[float] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""})
__lowercase: Optional[int] = field(
default=1_00_00 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""})
__lowercase: Optional[float] = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""})
__lowercase: Optional[str] = field(default="""cosine""" , metadata={"""help""": """Learning rate."""})
__lowercase: Optional[int] = field(
default=7_50 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""})
__lowercase: Optional[int] = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""})
__lowercase: Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""})
__lowercase: Optional[int] = field(default=5_00_00 , metadata={"""help""": """Maximum number of training steps."""})
__lowercase: Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""})
__lowercase: Optional[int] = field(default=10_24 , metadata={"""help""": """Sequence lengths used for training."""})
__lowercase: Optional[int] = field(default=1 , metadata={"""help""": """Training seed."""})
__lowercase: Optional[int] = field(
default=10_24 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__lowercase: Optional[str] = field(
default=snake_case__ , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""})
__lowercase: Optional[bool] = field(default=snake_case__ , metadata={"""help""": """If True the data is pretokenized."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""})
__lowercase: Optional[int] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""})
__lowercase: Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""})
__lowercase: Optional[int] = field(default=10_24 , metadata={"""help""": """Length of sequences to be evaluated."""})
__lowercase: Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""})
__lowercase: Optional[int] = field(default=snake_case__ , metadata={"""help""": """Number of workers used for code evaluation."""})
__lowercase: Optional[int] = field(
default=snake_case__ , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__lowercase: Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """Sample from the language model's output distribution."""})
__lowercase: Optional[float] = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""})
__lowercase: Optional[int] = field(default=2_56 , metadata={"""help""": """Maximum number of newly generated tokens."""})
__lowercase: Optional[int] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""})
__lowercase: Optional[float] = field(default=0.9_5 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""})
__lowercase: Optional[int] = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""})
__lowercase: Optional[int] = field(
default=2_00 , metadata={"""help""": """Number of completions to generate for each sample."""})
__lowercase: Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""})
__lowercase: Optional[str] = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""})
__lowercase: Optional[str] = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""})
__lowercase: Optional[int] = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[int] = field(
default=snake_case__ , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__lowercase: Optional[str] = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""})
__lowercase: Optional[str] = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""})
__lowercase: Optional[int] = field(
default=10_00_00 , metadata={"""help""": """Number of files to save per JSON output file."""})
__lowercase: Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""})
__lowercase: Optional[float] = field(
default=10_00 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""})
__lowercase: Optional[float] = field(
default=1_00 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""})
__lowercase: Optional[float] = field(
default=0.2_5 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""})
__lowercase: Optional[float] = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""})
__lowercase: Optional[float] = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__lowercase: Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """If True, near-duplicate samples are removed."""})
__lowercase: Optional[float] = field(
default=0.8_5 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""})
__lowercase: Optional[str] = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""})
__lowercase: Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""})
__lowercase: Optional[int] = field(default=20_00_00 , metadata={"""help""": """Number of examples to train tokenizer on."""})
__lowercase: Optional[int] = field(
default=3_27_68 , metadata={"""help""": """Number of examples to train the tokenizer on."""})
__lowercase: Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""})
__lowercase: Optional[bool] = field(default=snake_case__ , metadata={"""help""": """Push saved tokenizer to the hub."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""})
__lowercase: Optional[str] = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""})
__lowercase: Optional[int] = field(default=snake_case__ , metadata={"""help""": """Number of workers used for code evaluation."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""})
__lowercase: Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""})
__lowercase: Optional[bool] = field(default=snake_case__ , metadata={"""help""": """Push saved tokenizer to the hub."""})
| 233
| 1
|
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
A : Optional[int] = "sshleifer/mar_enro_6_3_student"
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self ):
super().setUp()
__lowerCAmelCase = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=__a , )
__lowerCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def snake_case ( self ):
MarianMTModel.from_pretrained(__a )
@slow
@require_torch_gpu
def snake_case ( self ):
__lowerCAmelCase = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
__lowerCAmelCase = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
__lowerCAmelCase = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
__lowerCAmelCase = bash_script.replace(__a , str(__a ) )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__lowerCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__lowerCAmelCase = ["finetune.py"] + bash_script.split() + args
with patch.object(__a , "argv" , __a ):
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = pl.Trainer.add_argparse_args(__a )
__lowerCAmelCase = SummarizationModule.add_model_specific_args(__a , os.getcwd() )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = main(__a )
# Check metrics
__lowerCAmelCase = load_json(model.metrics_save_path )
__lowerCAmelCase = metrics["val"][0]
__lowerCAmelCase = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , __a )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCAmelCase = os.listdir(__a )
__lowerCAmelCase = [x for x in contents if x.endswith(".ckpt" )][0]
__lowerCAmelCase = os.path.join(args.output_dir , __a )
__lowerCAmelCase = torch.load(__a , map_location="cpu" )
__lowerCAmelCase = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCAmelCase = {os.path.basename(__a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def snake_case ( self ):
__lowerCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
__lowerCAmelCase = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 1_28,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
__lowerCAmelCase = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
__lowerCAmelCase = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
__lowerCAmelCase = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
__lowerCAmelCase = bash_script.replace(__a , str(__a ) )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = bash_script.replace("--fp16" , "" )
__lowerCAmelCase = 6
__lowerCAmelCase = (
["distillation.py"]
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
"--gpus=1",
"--learning_rate=1e-3",
f"--num_train_epochs={epochs}",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(__a , "argv" , __a ):
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = pl.Trainer.add_argparse_args(__a )
__lowerCAmelCase = SummarizationDistiller.add_model_specific_args(__a , os.getcwd() )
__lowerCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__lowerCAmelCase = distill_main(__a )
# Check metrics
__lowerCAmelCase = load_json(model.metrics_save_path )
__lowerCAmelCase = metrics["val"][0]
__lowerCAmelCase = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , __a )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCAmelCase = os.listdir(__a )
__lowerCAmelCase = [x for x in contents if x.endswith(".ckpt" )][0]
__lowerCAmelCase = os.path.join(args.output_dir , __a )
__lowerCAmelCase = torch.load(__a , map_location="cpu" )
__lowerCAmelCase = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCAmelCase = {os.path.basename(__a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 57
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'tf_padding' ) )
self.parent.assertTrue(hasattr(_snake_case , 'depth_multiplier' ) )
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=3 , _snake_case=32 , _snake_case=0.25 , _snake_case=8 , _snake_case=8 , _snake_case=6 , _snake_case=32 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case="relu6" , _snake_case=12_80 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=True , _snake_case=True , _snake_case=10 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = depth_multiplier
lowerCAmelCase = depth_divisible_by
lowerCAmelCase = min_depth
lowerCAmelCase = expand_ratio
lowerCAmelCase = tf_padding
lowerCAmelCase = output_stride
lowerCAmelCase = first_layer_is_expansion
lowerCAmelCase = finegrained_output
lowerCAmelCase = hidden_act
lowerCAmelCase = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = use_labels
lowerCAmelCase = is_training
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = MobileNetVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileNetVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileNetVaForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileNetVaModelTester(self )
lowerCAmelCase = MobileNetVaConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_snake_case , _snake_case ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = 16
self.assertEqual(len(_snake_case ) , _snake_case )
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = MobileNetVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_snake_case )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**_snake_case )
# verify the logits
lowerCAmelCase = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , _snake_case )
lowerCAmelCase = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
lowerCAmelCase = model.to(_snake_case )
lowerCAmelCase = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**_snake_case )
lowerCAmelCase = outputs.logits
# verify the logits
lowerCAmelCase = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _snake_case )
lowerCAmelCase = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=_snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
| 309
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_A = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def lowerCamelCase__ ( ) -> Optional[Any]:
UpperCamelCase_ = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCamelCase_ = get_sagemaker_input()
else:
UpperCamelCase_ = get_cluster_input()
return config
def lowerCamelCase__ ( a__ : List[str]=None ) -> Union[str, Any]:
if subparsers is not None:
UpperCamelCase_ = subparsers.add_parser("""config""" , description=a__ )
else:
UpperCamelCase_ = argparse.ArgumentParser("""Accelerate config command""" , description=a__ )
parser.add_argument(
"""--config_file""" , default=a__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def lowerCamelCase__ ( a__ : Any ) -> List[Any]:
UpperCamelCase_ = get_user_input()
if args.config_file is not None:
UpperCamelCase_ = args.config_file
else:
if not os.path.isdir(a__ ):
os.makedirs(a__ )
UpperCamelCase_ = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(a__ )
else:
config.to_yaml_file(a__ )
print(f'''accelerate configuration saved at {config_file}''' )
def lowerCamelCase__ ( ) -> str:
UpperCamelCase_ = config_command_parser()
UpperCamelCase_ = parser.parse_args()
config_command(a__ )
if __name__ == "__main__":
main()
| 122
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
| 0
|
import torch
from transformers import AutoModel
class __magic_name__ ( torch.nn.Module ):
def __init__( self , __snake_case="sayef/fsner-bert-base-uncased" ) -> Optional[int]:
'''simple docstring'''
super(__snake_case , self ).__init__()
__a =AutoModel.from_pretrained(__snake_case , return_dict=__snake_case )
__a =torch.nn.CosineSimilarity(3 , 1e-08 )
__a =torch.nn.Softmax(dim=1 )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
return self.bert(**__snake_case ).last_hidden_state
def __magic_name__ ( self , __snake_case ) -> Any:
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=1 ) -> Optional[Any]:
'''simple docstring'''
return self.softmax(T * self.cos(__snake_case , __snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =W_supports['sizes'].tolist()
__a =W_supports['start_token_id'].item()
__a =W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a =self.BERT(**__snake_case )
__a =self.BERT(**__snake_case )
__a =None
__a =None
__a =W_supports['input_ids'] == start_token_id
__a =W_supports['input_ids'] == end_token_id
for i, size in enumerate(__snake_case ):
if i == 0:
__a =0
else:
__a =support_sizes[i - 1]
__a =S[s : s + size][start_token_masks[s : s + size]]
__a =S[s : s + size][end_token_masks[s : s + size]]
__a =torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a =torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a =torch.vstack((p_starts, p_start) )
__a =torch.vstack((p_ends, p_end) )
else:
__a =p_start
__a =p_end
return p_starts, p_ends
| 308
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__()
__a =model
__a =2
__a =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =LongformerModel.from_pretrained(_snake_case )
__a =LightningModel(_snake_case )
__a =torch.load(_snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__a =LongformerForQuestionAnswering.from_pretrained(_snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 308
| 1
|
"""simple docstring"""
def _snake_case ( snake_case__ : List[str] ):
A = 1
A = 2
while i * i <= n:
A = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _snake_case ( ):
A = 1
A = 1
while True:
i += 1
t_num += i
if count_divisors(snake_case__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 74
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='autoformer'
__lowerCamelCase : str ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302
| 0
|
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase__ = 6_37_81_37.0
lowerCAmelCase__ = 6_35_67_52.31_42_45
lowerCAmelCase__ = 6_378_137
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = (AXIS_A - AXIS_B) / AXIS_A
UpperCamelCase = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase = radians(_SCREAMING_SNAKE_CASE )
UpperCamelCase = radians(_SCREAMING_SNAKE_CASE )
# Equation
UpperCamelCase = sin((phi_a - phi_a) / 2 )
UpperCamelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCamelCase = sqrt(sin_sq_phi + (cos(_SCREAMING_SNAKE_CASE ) * cos(_SCREAMING_SNAKE_CASE ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCamelCase :
def __init__(self ) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def snake_case_ (self ) -> list[float]:
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(__a , __a )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 244
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase : List[Any] = 16
lowerCamelCase : Optional[int] = 32
def snake_case_ ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 , lowerCAmelCase_ : str = "bert-base-cased" ):
__lowercase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowercase : List[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase_ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__lowercase : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowercase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
__lowercase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ):
# Initialize accelerator
__lowercase : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase : Optional[Any] = config["""lr"""]
__lowercase : str = int(config["""num_epochs"""] )
__lowercase : str = int(config["""seed"""] )
__lowercase : Optional[Any] = int(config["""batch_size"""] )
__lowercase : Any = args.model_name_or_path
set_seed(lowerCAmelCase_ )
__lowercase , __lowercase : Any = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
# Instantiate optimizer
__lowercase : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase : Optional[int] = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
__lowercase : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowercase : Optional[int] = 1
__lowercase : Dict = (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase_ , )
else:
__lowercase : Optional[int] = DummyScheduler(lowerCAmelCase_ , total_num_steps=lowerCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
__lowercase : int = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase : List[str] = 0
# Now we train the model
__lowercase : Any = evaluate.load("""glue""" , """mrpc""" )
__lowercase : int = 0
__lowercase : List[str] = {}
for epoch in range(lowerCAmelCase_ , lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
__lowercase : Union[str, Any] = model(**lowerCAmelCase_ )
__lowercase : Optional[int] = outputs.loss
__lowercase : Any = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__lowercase : Tuple = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase : Optional[Any] = model(**lowerCAmelCase_ )
__lowercase : Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowercase , __lowercase : Optional[int] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase_ ) - 1:
__lowercase : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowercase : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
__lowercase : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCAmelCase_ )
__lowercase : Dict = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
__lowercase : Union[str, Any] = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCAmelCase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCAmelCase_ , )
parser.add_argument(
"""--output_dir""" , type=lowerCAmelCase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase_ , default=3 , help="""Number of train epochs.""" , )
__lowercase : List[str] = parser.parse_args()
__lowercase : Dict = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 233
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_A : Any = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_A : Tuple = '''document_qa'''
_A : Dict = AutoProcessor
_A : Tuple = VisionEncoderDecoderModel
_A : Optional[int] = ['''image''', '''text''']
_A : Optional[int] = ['''text''']
def __init__( self : Any , *__a : List[str] , **__a : Any ) -> Optional[Any]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*__a , **__a )
def lowerCAmelCase ( self : List[Any] , __a : "Image" , __a : str ) -> List[str]:
"""simple docstring"""
__lowercase : int = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowercase : str = task_prompt.replace("""{user_input}""" , __a )
__lowercase : Union[str, Any] = self.pre_processor.tokenizer(
__a , add_special_tokens=__a , return_tensors="""pt""" ).input_ids
__lowercase : int = self.pre_processor(__a , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCAmelCase ( self : Optional[int] , __a : int ) -> int:
"""simple docstring"""
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences
def lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.pre_processor.batch_decode(__a )[0]
__lowercase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
__lowercase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
__lowercase : Optional[Any] = re.sub(r"""<.*?>""" , """""" , __a , count=1 ).strip() # remove first task start token
__lowercase : Dict = self.pre_processor.tokenajson(__a )
return sequence["answer"]
| 233
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A : Optional[Any] = logging.get_logger(__name__)
def a__ ( __UpperCamelCase ):
if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__UpperCamelCase ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''pixel_values''']
def __init__( self : Dict , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 255 , __magic_name__ : bool = True , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : List[Any] , ) -> None:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , param_name="crop_size" )
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = offset
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Tuple , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : List[str] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(__magic_name__ , size["shortest_edge"] , default_to_square=__magic_name__ )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE_ = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : Tuple , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__magic_name__ , size=(size["height"], size["width"]) , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : str , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] , ) -> str:
SCREAMING_SNAKE_CASE_ = image.astype(np.floataa )
if offset:
SCREAMING_SNAKE_CASE_ = image - (scale / 2)
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : List[str] , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ) -> np.ndarray:
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : List[str] , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = to_numpy_array(__magic_name__ )
if do_resize:
SCREAMING_SNAKE_CASE_ = self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ )
if do_center_crop:
SCREAMING_SNAKE_CASE_ = self.center_crop(__magic_name__ , size=__magic_name__ )
if do_rescale:
SCREAMING_SNAKE_CASE_ = self.rescale(image=__magic_name__ , scale=__magic_name__ , offset=__magic_name__ )
if do_normalize:
SCREAMING_SNAKE_CASE_ = self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ )
SCREAMING_SNAKE_CASE_ = to_channel_dimension_format(__magic_name__ , __magic_name__ )
return image
def __A ( self : Dict , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , param_name="crop_size" )
if not valid_images(__magic_name__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
SCREAMING_SNAKE_CASE_ = make_batched(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [
[
self._preprocess_image(
image=__magic_name__ , do_resize=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , do_center_crop=__magic_name__ , crop_size=__magic_name__ , do_rescale=__magic_name__ , rescale_factor=__magic_name__ , offset=__magic_name__ , do_normalize=__magic_name__ , image_mean=__magic_name__ , image_std=__magic_name__ , data_format=__magic_name__ , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE_ = {"pixel_values": videos}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 305
|
from math import pi, sqrt, tan
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
SCREAMING_SNAKE_CASE_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
SCREAMING_SNAKE_CASE_ = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 305
| 1
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __A ( UpperCamelCase__ ):
def _lowercase (self : str ):
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , "width_multiplier" ) )
class __A :
def __init__(self : str , __a : int , __a : str=13 , __a : Optional[int]=64 , __a : Optional[int]=2 , __a : Any=3 , __a : int="swish" , __a : Union[str, Any]=3 , __a : List[str]=32 , __a : Tuple=0.1 , __a : str=0.02 , __a : Optional[int]=True , __a : List[Any]=True , __a : Union[str, Any]=10 , __a : int=None , __a : List[Any]=0.25 , __a : Optional[Any]=0.0 , __a : Union[str, Any]=0.0 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = output_stride
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = width_multiplier
UpperCAmelCase_ = ffn_dropout
UpperCAmelCase_ = attn_dropout
def _lowercase (self : List[str] ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase (self : Any ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowercase (self : str , __a : Union[str, Any] , __a : Union[str, Any] , __a : Any , __a : Tuple ):
UpperCAmelCase_ = MobileViTVaModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowercase (self : int , __a : Optional[Any] , __a : List[Any] , __a : int , __a : Any ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTVaForImageClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase (self : Optional[Any] , __a : List[str] , __a : Any , __a : Any , __a : Optional[Any] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : str = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a__ : Any = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a__ : List[Any] = False
a__ : str = False
a__ : Union[str, Any] = False
a__ : int = False
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = MobileViTVaModelTester(self )
UpperCAmelCase_ = MobileViTVaConfigTester(self , config_class=__a , has_text_modality=__a )
def _lowercase (self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def _lowercase (self : str ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def _lowercase (self : Optional[Any] ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def _lowercase (self : Any ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def _lowercase (self : List[str] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase (self : Union[str, Any] ):
pass
def _lowercase (self : List[str] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : int ):
def check_hidden_states_output(__a : List[Any] , __a : List[Any] , __a : Optional[int] ):
UpperCAmelCase_ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 5
self.assertEqual(len(__a ) , __a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ = 2
for i in range(len(__a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(__a , __a , __a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def _lowercase (self : Any ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MobileViTVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def _lowercase (self : Union[str, Any] ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
__a )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**__a )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase_ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = model.to(__a )
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**__a )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __a )
UpperCAmelCase_ = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
@slow
def _lowercase (self : List[str] ):
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = model.to(__a )
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**__a )
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=__a , target_sizes=[(50, 60)] )
UpperCAmelCase_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __a )
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=__a )
UpperCAmelCase_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __a )
| 106
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : Dict = """t5"""
a__ : List[str] = ["""past_key_values"""]
a__ : Union[str, Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__(self : List[str] , __a : Any=32128 , __a : Optional[int]=512 , __a : Union[str, Any]=64 , __a : List[str]=2048 , __a : Optional[int]=6 , __a : Union[str, Any]=None , __a : Union[str, Any]=8 , __a : Dict=32 , __a : List[Any]=128 , __a : Optional[Any]=0.1 , __a : List[Any]=1E-6 , __a : str=1.0 , __a : Dict="relu" , __a : int=True , __a : int=True , __a : Optional[Any]=0 , __a : Dict=1 , **__a : List[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = d_kv
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = feed_forward_proj
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = self.feed_forward_proj.split("-" )
UpperCAmelCase_ = act_info[-1]
UpperCAmelCase_ = act_info[0] == "gated"
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ = "gelu_new"
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : str ):
UpperCAmelCase_ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ = "past_encoder_sequence + sequence"
UpperCAmelCase_ = {0: "batch"}
UpperCAmelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="inputs" )
return common_inputs
@property
def _lowercase (self : Optional[int] ):
return 13
| 106
| 1
|
'''simple docstring'''
import os
import string
import sys
_A : List[Any] = 1 << 8
_A : Union[str, Any] = {
"""tab""": ord('''\t'''),
"""newline""": ord('''\r'''),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
_A : Optional[Any] = KEYMAP["""up"""]
_A : Tuple = KEYMAP["""left"""]
if sys.platform == "win32":
_A : List[Any] = []
_A : int = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
_A : List[str] = ord(str(i))
def UpperCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
__lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
__lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
__lowerCAmelCase = cha[1]
else:
__lowerCAmelCase = ch.decode(_A )
else:
__lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__lowerCAmelCase = sys.stdin.fileno()
__lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
__lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def UpperCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
__lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
__lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 229
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : Tuple = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =word.split()
def justify(__UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
__UpperCamelCase =max_width - width
__UpperCamelCase =len(__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__UpperCamelCase =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__UpperCamelCase =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__UpperCamelCase =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__UpperCamelCase ):
num_spaces_between_words_list[i] += 1
__UpperCamelCase =[]
for i in range(__UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__UpperCamelCase )
__UpperCamelCase =[]
__UpperCamelCase =[]
__UpperCamelCase =0
for word in words:
if width + len(__UpperCamelCase ) + len(__UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__UpperCamelCase )
width += len(__UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
# reset new line and new width
__UpperCamelCase , __UpperCamelCase =[word], len(__UpperCamelCase )
__UpperCamelCase =max_width - width - len(__UpperCamelCase )
answer.append(''' '''.join(__UpperCamelCase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 358
|
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 3 , __UpperCamelCase : int = 7 , __UpperCamelCase : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =0
__UpperCamelCase =1
for current_denominator in range(1 , limit + 1 ):
__UpperCamelCase =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__UpperCamelCase =current_numerator
__UpperCamelCase =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 85
| 0
|
from collections.abc import Callable
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ = None ):
# Stores actual heap items.
UpperCamelCase__ = []
# Stores indexes of each item for supporting updates and deletion.
UpperCamelCase__ = {}
# Stores current size of heap.
UpperCamelCase__ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCamelCase__ = key or (lambda SCREAMING_SNAKE_CASE_ : x)
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
return int((i - 1) / 2 ) if i > 0 else None
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ , UpperCamelCase__ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCamelCase__ , UpperCamelCase__ = self.arr[j], self.arr[i]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return self.arr[i][1] < self.arr[j][1]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self._left(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._right(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = right
return valid_parent
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self._parent(SCREAMING_SNAKE_CASE_ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self._swap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = parent, self._parent(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self._get_valid_parent(SCREAMING_SNAKE_CASE_ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if item not in self.pos_map:
return
UpperCamelCase__ = self.pos_map[item]
UpperCamelCase__ = [item, self.key(SCREAMING_SNAKE_CASE_ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE_ )
self._heapify_down(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if item not in self.pos_map:
return
UpperCamelCase__ = self.pos_map[item]
del self.pos_map[item]
UpperCamelCase__ = self.arr[self.size - 1]
UpperCamelCase__ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE_ )
self._heapify_down(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE_ )] )
else:
UpperCamelCase__ = [item, self.key(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase__ = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCAmelCase_ (self ):
return self.arr[0] if self.size else None
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __magic_name__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def UpperCAmelCase_ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = """this is a test"""
UpperCamelCase__ = """this is a test"""
return input_text, output_text
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """<pad>"""
UpperCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3_00_01 )
def UpperCAmelCase_ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase__ = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def UpperCAmelCase_ (self ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase__ = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """This is a test"""
UpperCamelCase__ = [13, 1, 43_98, 25, 21, 12_89]
UpperCamelCase__ = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase__ = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCamelCase__ = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
UpperCamelCase__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode("""sequence builders""" )
UpperCamelCase__ = tokenizer.encode("""multi-sequence build""" )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , SCREAMING_SNAKE_CASE_ , )
@slow
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = {"""input_ids""": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 244
| 1
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE_ = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
SCREAMING_SNAKE_CASE_ = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = None
# source code of `config_class`
_UpperCAmelCase : int = inspect.getsource(lowerCAmelCase )
_UpperCAmelCase : Tuple = _re_checkpoint.findall(lowerCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
_UpperCAmelCase : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase : Any = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase : Union[str, Any] = ckpt_name
break
return checkpoint
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCAmelCase : int = get_checkpoint_from_config_class(lowerCAmelCase )
_UpperCAmelCase : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
_UpperCAmelCase : int = "\n".join(sorted(lowerCAmelCase ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 189
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: float | Decimal , lowerCAmelCase: float = 10**-10 ) -> float:
_UpperCAmelCase : Optional[int] = a
while True:
_UpperCAmelCase : Tuple = Decimal(lowerCAmelCase ) - (
Decimal(eval(lowerCAmelCase ) ) / Decimal(eval(str(diff(lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase ) ) < precision: # noqa: S307
return float(lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 189
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
A : Optional[Any] = TypeVar('T')
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (position - 1) // 2
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (2 * position) + 1
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (2 * position) + 2
class A ( Generic[T] ):
'''simple docstring'''
def __init__(self : Optional[int] ) -> None:
"""simple docstring"""
lowercase__ = []
lowercase__ = {}
lowercase__ = 0
def __len__(self : Optional[int] ) -> int:
"""simple docstring"""
return self.elements
def __repr__(self : Dict ) -> str:
"""simple docstring"""
return str(self.heap )
def lowerCamelCase__ (self : Tuple ) -> bool:
"""simple docstring"""
return self.elements == 0
def lowerCamelCase__ (self : Any , _UpperCAmelCase : T , _UpperCAmelCase : int ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
lowercase__ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowercase__ , lowercase__ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowercase__ , lowercase__ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : T , _UpperCAmelCase : int ) -> None:
"""simple docstring"""
lowercase__ = self.position_map[elem]
lowercase__ = (elem, weight)
if position > 0:
lowercase__ = get_parent_position(_UpperCAmelCase )
lowercase__ , lowercase__ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : T ) -> None:
"""simple docstring"""
lowercase__ = self.position_map[elem]
if curr_pos == 0:
return None
lowercase__ = get_parent_position(_UpperCAmelCase )
lowercase__ , lowercase__ = self.heap[curr_pos]
lowercase__ , lowercase__ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : T ) -> None:
"""simple docstring"""
lowercase__ = self.position_map[elem]
lowercase__ , lowercase__ = self.heap[curr_pos]
lowercase__ = get_child_left_position(_UpperCAmelCase )
lowercase__ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
lowercase__ , lowercase__ = self.heap[child_left_position]
lowercase__ , lowercase__ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
lowercase__ , lowercase__ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
lowercase__ , lowercase__ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> None:
"""simple docstring"""
lowercase__ = self.heap[nodea_pos][0]
lowercase__ = self.heap[nodea_pos][0]
lowercase__ , lowercase__ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowercase__ = nodea_pos
lowercase__ = nodea_pos
class A ( Generic[T] ):
'''simple docstring'''
def __init__(self : List[Any] ) -> None:
"""simple docstring"""
lowercase__ = {}
lowercase__ = 0
def __repr__(self : str ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__(self : Optional[Any] ) -> int:
"""simple docstring"""
return self.nodes
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : T ) -> None:
"""simple docstring"""
if node not in self.connections:
lowercase__ = {}
self.nodes += 1
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : T , _UpperCAmelCase : T , _UpperCAmelCase : int ) -> None:
"""simple docstring"""
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
lowercase__ = weight
lowercase__ = weight
def UpperCamelCase ( __magic_name__ : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
"""simple docstring"""
lowercase__ = {node: maxsize for node in graph.connections}
lowercase__ = {node: None for node in graph.connections}
lowercase__ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__magic_name__ , __magic_name__ )
if priority_queue.is_empty():
return dist, parent
# initialization
lowercase__ = priority_queue.extract_min()
lowercase__ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__magic_name__ , dist[neighbour] )
lowercase__ = node
# running prim's algorithm
while not priority_queue.is_empty():
lowercase__ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__magic_name__ , dist[neighbour] )
lowercase__ = node
return dist, parent
| 305
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tmp_path / """file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
lowercase__ = tmp_path / """malformed_file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_image.csv"""
lowercase__ = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_label.csv"""
lowercase__ = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_int_list.csv"""
lowercase__ = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__magic_name__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__magic_name__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowercase__ = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowercase__ = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__magic_name__ ) for label in labels]
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __magic_name__ : [int(__magic_name__ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowercase__ = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 305
| 1
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
_SCREAMING_SNAKE_CASE = MaskFormerConfig(backbone_config=snake_case__ )
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
_SCREAMING_SNAKE_CASE = 8_47
_SCREAMING_SNAKE_CASE = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
_SCREAMING_SNAKE_CASE = 1_50
_SCREAMING_SNAKE_CASE = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
_SCREAMING_SNAKE_CASE = 1_71
_SCREAMING_SNAKE_CASE = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
_SCREAMING_SNAKE_CASE = 1_33
_SCREAMING_SNAKE_CASE = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
_SCREAMING_SNAKE_CASE = 19
_SCREAMING_SNAKE_CASE = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
_SCREAMING_SNAKE_CASE = 65
_SCREAMING_SNAKE_CASE = """mapillary-vistas-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(snake_case__ ,snake_case__ ,repo_type="""dataset""" ) ,"""r""" ) )
_SCREAMING_SNAKE_CASE = {int(snake_case__ ): v for k, v in idalabel.items()}
return config
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_SCREAMING_SNAKE_CASE = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[:dim, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[: dim]
_SCREAMING_SNAKE_CASE = in_proj_weight[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-dim :, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[-dim :]
# fmt: on
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[: hidden_size, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[:config.hidden_size]
_SCREAMING_SNAKE_CASE = in_proj_weight[hidden_size : hidden_size * 2, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[hidden_size : hidden_size * 2]
_SCREAMING_SNAKE_CASE = in_proj_weight[-hidden_size :, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[: hidden_size, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[:config.hidden_size]
_SCREAMING_SNAKE_CASE = in_proj_weight[hidden_size : hidden_size * 2, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[hidden_size : hidden_size * 2]
_SCREAMING_SNAKE_CASE = in_proj_weight[-hidden_size :, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[-hidden_size :]
# fmt: on
def __lowerCamelCase ( ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case__ ,stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = False ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_maskformer_config(snake_case__ )
# load original state_dict
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )
_SCREAMING_SNAKE_CASE = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_SCREAMING_SNAKE_CASE = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ ,snake_case__ ,snake_case__ )
read_in_swin_q_k_v(snake_case__ ,config.backbone_config )
read_in_decoder_q_k_v(snake_case__ ,snake_case__ )
# update to torch tensors
for key, value in state_dict.items():
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
# load 🤗 model
_SCREAMING_SNAKE_CASE = MaskFormerForInstanceSegmentation(snake_case__ )
model.eval()
for name, param in model.named_parameters():
print(snake_case__ ,param.shape )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(snake_case__ ,strict=snake_case__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(snake_case__ ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_SCREAMING_SNAKE_CASE = prepare_img()
if "vistas" in model_name:
_SCREAMING_SNAKE_CASE = 65
elif "cityscapes" in model_name:
_SCREAMING_SNAKE_CASE = 6_55_35
else:
_SCREAMING_SNAKE_CASE = 2_55
_SCREAMING_SNAKE_CASE = True if """ade""" in model_name else False
_SCREAMING_SNAKE_CASE = MaskFormerImageProcessor(ignore_index=snake_case__ ,reduce_labels=snake_case__ )
_SCREAMING_SNAKE_CASE = image_processor(snake_case__ ,return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
print("""Logits:""" ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_SCREAMING_SNAKE_CASE = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,snake_case__ ,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 125
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ = 1_60_00 ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(round(sample_rate * max_length ) )
if len(snake_case__ ) <= sample_length:
return wav
_SCREAMING_SNAKE_CASE = randint(0 ,len(snake_case__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCAmelCase :
__snake_case : Optional[str] = field(default=_UpperCAmelCase ,metadata={"help": "Name of a dataset from the datasets package"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "A file containing the training audio paths and labels."} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "A file containing the validation audio paths and labels."} )
__snake_case : str = field(
default="train" ,metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} ,)
__snake_case : str = field(
default="validation" ,metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} ,)
__snake_case : str = field(
default="audio" ,metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} ,)
__snake_case : str = field(
default="label" ,metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
__snake_case : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
__snake_case : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
__snake_case : float = field(
default=20 ,metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} ,)
@dataclass
class __UpperCAmelCase :
__snake_case : str = field(
default="facebook/wav2vec2-base" ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ,)
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
__snake_case : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Name or path of preprocessor config."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
__snake_case : Optional[bool] = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , UpperCAmelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" ,snake_case__ ,snake_case__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
_SCREAMING_SNAKE_CASE = DatasetDict()
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--label_column_name` to the correct text column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_SCREAMING_SNAKE_CASE = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_SCREAMING_SNAKE_CASE = feature_extractor.model_input_names[0]
def train_transforms(snake_case__ ):
_SCREAMING_SNAKE_CASE = []
for audio in batch[data_args.audio_column_name]:
_SCREAMING_SNAKE_CASE = random_subsample(
audio["""array"""] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case__ )
_SCREAMING_SNAKE_CASE = feature_extractor(snake_case__ ,sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE = {model_input_name: inputs.get(snake_case__ )}
_SCREAMING_SNAKE_CASE = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case__ ):
_SCREAMING_SNAKE_CASE = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
_SCREAMING_SNAKE_CASE = feature_extractor(snake_case__ ,sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE = {model_input_name: inputs.get(snake_case__ )}
_SCREAMING_SNAKE_CASE = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features[data_args.label_column_name].names
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = {}, {}
for i, label in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = str(snake_case__ )
_SCREAMING_SNAKE_CASE = label
# Load the accuracy metric from the datasets package
_SCREAMING_SNAKE_CASE = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case__ ):
_SCREAMING_SNAKE_CASE = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=snake_case__ ,references=eval_pred.label_ids )
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(snake_case__ ) ,labelaid=snake_case__ ,idalabel=snake_case__ ,finetuning_task="""audio-classification""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_SCREAMING_SNAKE_CASE = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=snake_case__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case__ ,output_all_columns=snake_case__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case__ ,output_all_columns=snake_case__ )
# Initialize our trainer
_SCREAMING_SNAKE_CASE = Trainer(
model=snake_case__ ,args=snake_case__ ,train_dataset=raw_datasets["""train"""] if training_args.do_train else None ,eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None ,compute_metrics=snake_case__ ,tokenizer=snake_case__ ,)
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE = last_checkpoint
_SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics("""train""" ,train_result.metrics )
trainer.save_metrics("""train""" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics("""eval""" ,snake_case__ )
trainer.save_metrics("""eval""" ,snake_case__ )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 125
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE ( a_ , a_ ):
"""simple docstring"""
lowercase__ = "swin"
lowercase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str ,lowercase_ : List[str]=2_2_4 ,lowercase_ : Union[str, Any]=4 ,lowercase_ : Dict=3 ,lowercase_ : Tuple=9_6 ,lowercase_ : Any=[2, 2, 6, 2] ,lowercase_ : Any=[3, 6, 1_2, 2_4] ,lowercase_ : str=7 ,lowercase_ : Optional[Any]=4.0 ,lowercase_ : Optional[int]=True ,lowercase_ : Union[str, Any]=0.0 ,lowercase_ : Optional[Any]=0.0 ,lowercase_ : int=0.1 ,lowercase_ : Dict="gelu" ,lowercase_ : Optional[Any]=False ,lowercase_ : Any=0.02 ,lowercase_ : str=1E-5 ,lowercase_ : str=3_2 ,lowercase_ : List[Any]=None ,lowercase_ : Union[str, Any]=None ,**lowercase_ : List[str] ,):
super().__init__(**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : List[str] = patch_size
lowerCAmelCase__ : Optional[Any] = num_channels
lowerCAmelCase__ : str = embed_dim
lowerCAmelCase__ : Optional[int] = depths
lowerCAmelCase__ : Tuple = len(lowercase_ )
lowerCAmelCase__ : List[Any] = num_heads
lowerCAmelCase__ : str = window_size
lowerCAmelCase__ : str = mlp_ratio
lowerCAmelCase__ : Optional[Any] = qkv_bias
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : int = use_absolute_embeddings
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase__ : List[str] = ['''stem'''] + [F'stage{idx}' for idx in range(1 ,len(lowercase_ ) + 1 )]
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = get_aligned_output_features_output_indices(
out_features=lowercase_ ,out_indices=lowercase_ ,stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = version.parse("1.11" )
@property
def __lowerCAmelCase ( self : Optional[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self : str ):
return 1E-4
| 106
|
"""simple docstring"""
import os
from collections.abc import Iterator
def __SCREAMING_SNAKE_CASE ( A_ = "." ):
for dir_path, dir_names, filenames in os.walk(A_ ):
lowerCAmelCase__ : str = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A_ )[1] in (".py", ".ipynb"):
yield os.path.join(A_ , A_ ).lstrip('''./''' )
def __SCREAMING_SNAKE_CASE ( A_ ):
return f'{i * " "}*' if i else "\n##"
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A_ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A_ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def __SCREAMING_SNAKE_CASE ( A_ = "." ):
lowerCAmelCase__ : Any = ''''''
for filepath in sorted(good_file_paths(A_ ) ):
lowerCAmelCase__ ,lowerCAmelCase__ : str = os.path.split(A_ )
if filepath != old_path:
lowerCAmelCase__ : str = print_path(A_ , A_ )
lowerCAmelCase__ : str = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase__ : Union[str, Any] = f'{filepath}/{filename}'.replace(''' ''' , '''%20''' )
lowerCAmelCase__ : List[str] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'{md_prefix(A_ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 106
| 1
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = (DDPMScheduler,)
def lowercase_ ( self : Optional[Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_A )
return config
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def lowercase_ ( self : str ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.scheduler_classes[0]
UpperCAmelCase__ : List[Any] = self.get_scheduler_config()
UpperCAmelCase__ : int = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[int] = self.get_scheduler_config()
UpperCAmelCase__ : Union[str, Any] = scheduler_class(**_A )
UpperCAmelCase__ : List[Any] = len(_A )
UpperCAmelCase__ : Union[str, Any] = self.dummy_model()
UpperCAmelCase__ : List[Any] = self.dummy_sample_deter
UpperCAmelCase__ : int = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
UpperCAmelCase__ : str = model(_A , _A )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase__ : List[Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase__ : Optional[Any] = pred_prev_sample
UpperCAmelCase__ : Dict = torch.sum(torch.abs(_A ) )
UpperCAmelCase__ : str = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase__ : Tuple = scheduler_class(**_A )
UpperCAmelCase__ : List[Any] = len(_A )
UpperCAmelCase__ : int = self.dummy_model()
UpperCAmelCase__ : Union[str, Any] = self.dummy_sample_deter
UpperCAmelCase__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
UpperCAmelCase__ : Dict = model(_A , _A )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase__ : str = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase__ : List[str] = pred_prev_sample
UpperCAmelCase__ : Tuple = torch.sum(torch.abs(_A ) )
UpperCAmelCase__ : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : int = self.get_scheduler_config()
UpperCAmelCase__ : Union[str, Any] = scheduler_class(**_A )
UpperCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
UpperCAmelCase__ : Tuple = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
UpperCAmelCase__ : Union[str, Any] = -1
else:
UpperCAmelCase__ : Optional[Any] = timesteps[i + 1]
UpperCAmelCase__ : List[str] = scheduler.previous_timestep(_A )
UpperCAmelCase__ : Optional[Any] = prev_t.item()
self.assertEqual(_A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : str = self.get_scheduler_config()
UpperCAmelCase__ : Union[str, Any] = scheduler_class(**_A )
UpperCAmelCase__ : int = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.scheduler_classes[0]
UpperCAmelCase__ : List[Any] = self.get_scheduler_config()
UpperCAmelCase__ : Tuple = scheduler_class(**_A )
UpperCAmelCase__ : List[str] = [100, 87, 50, 1, 0]
UpperCAmelCase__ : Union[str, Any] = len(_A )
with self.assertRaises(_A , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = self.scheduler_classes[0]
UpperCAmelCase__ : str = self.get_scheduler_config()
UpperCAmelCase__ : str = scheduler_class(**_A )
UpperCAmelCase__ : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_A )
| 299
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a__ ( lowerCAmelCase__ ) -> List[Any]:
return 1 / (1 + np.exp(-z ))
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : str = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=7_00_00 ) -> List[Any]:
UpperCAmelCase__ : Tuple = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase__ ):
UpperCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = sigmoid_function(lowerCAmelCase__ )
UpperCAmelCase__ : int = np.dot(x.T , h - y ) / y.size
UpperCAmelCase__ : Optional[int] = theta - alpha * gradient # updating the weights
UpperCAmelCase__ : Dict = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : int = sigmoid_function(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = cost_function(lowerCAmelCase__ , lowerCAmelCase__ )
if iterations % 1_00 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase__ = datasets.load_iris()
UpperCamelCase__ = iris.data[:, :2]
UpperCamelCase__ = (iris.target != 0) * 1
UpperCamelCase__ = 0.1
UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def a__ ( lowerCAmelCase__ ) -> Dict:
return sigmoid_function(
np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 299
| 1
|
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase_ ( _a : Union[str, Any] , _a : str=1000 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase_ : str = n - 1
UpperCAmelCase_ : int = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase_ : str = 0
while count < prec:
UpperCAmelCase_ : Optional[int] = random.randint(2 , n - 1 )
UpperCAmelCase_ : List[str] = bin_exp_mod(_a , _a , _a )
if b != 1:
UpperCAmelCase_ : str = True
for _ in range(_a ):
if b == n - 1:
UpperCAmelCase_ : str = False
break
UpperCAmelCase_ : Any = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCamelCase_ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 345
|
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = min(snake_case )
snake_case_ = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = mean(snake_case )
snake_case_ = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 85
| 0
|
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
UpperCamelCase__ : Optional[Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCAmelCase ( a_ , a_ , a_=None ) -> int:
"""simple docstring"""
if rng is None:
A_ : Any = random.Random()
A_ : Optional[int] = 1
for dim in shape:
total_dims *= dim
A_ : Dict = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
A_ : Dict = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCAmelCase ( a_ , a_=None ) -> Any:
"""simple docstring"""
A_ : Tuple = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
A_ : Tuple = 1
return attn_mask
@require_flax
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = ()
def UpperCAmelCase_ ( self ) -> str:
A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
A_ : Tuple = 2
A_ : Tuple = inputs["""input_ids"""].shape[-1] // 2
A_ : Optional[Any] = inputs["""input_ids"""][:max_batch_size, :sequence_length]
A_ : Tuple = jnp.ones_like(__UpperCAmelCase )
A_ : Any = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
A_ : Any = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
A_ : Optional[int] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Optional[Any] = self._get_input_ids_and_config()
A_ : List[str] = False
A_ : Optional[Any] = max_length
A_ : int = 0
for model_class in self.all_generative_model_classes:
A_ : int = model_class(__UpperCAmelCase )
A_ : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ : Any = getattr(__UpperCAmelCase , __UpperCAmelCase )
A_ : str = pt_model_class(__UpperCAmelCase ).eval()
A_ : Union[str, Any] = load_flax_weights_in_pytorch_model(__UpperCAmelCase , flax_model.params )
A_ : str = flax_model.generate(__UpperCAmelCase ).sequences
A_ : int = pt_model.generate(torch.tensor(__UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
A_ : int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCAmelCase_ ( self ) -> int:
A_ : Union[str, Any] = self._get_input_ids_and_config()
A_ : str = False
A_ : Dict = max_length
for model_class in self.all_generative_model_classes:
A_ : List[Any] = model_class(__UpperCAmelCase )
A_ : Union[str, Any] = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
A_ : Optional[Any] = jit(model.generate )
A_ : Dict = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Dict = self._get_input_ids_and_config()
A_ : int = True
A_ : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
A_ : List[str] = model_class(__UpperCAmelCase )
A_ : Optional[int] = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
A_ : Optional[int] = jit(model.generate )
A_ : Any = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Union[str, Any] = self._get_input_ids_and_config()
A_ : Union[str, Any] = False
A_ : Tuple = max_length
A_ : Dict = 2
for model_class in self.all_generative_model_classes:
A_ : List[Any] = model_class(__UpperCAmelCase )
A_ : Dict = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
A_ : Optional[Any] = jit(model.generate )
A_ : int = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self._get_input_ids_and_config()
A_ : Optional[int] = False
A_ : Union[str, Any] = max_length
A_ : str = 2
A_ : Dict = 2
for model_class in self.all_generative_model_classes:
A_ : List[Any] = model_class(__UpperCAmelCase )
A_ : Optional[Any] = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Union[str, Any] = self._get_input_ids_and_config()
A_ : Optional[Any] = True
A_ : Tuple = max_length
A_ : str = 0.8
A_ : str = 10
A_ : List[str] = 0.3
A_ : Optional[Any] = 1
A_ : Union[str, Any] = 8
A_ : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
A_ : Union[str, Any] = model_class(__UpperCAmelCase )
A_ : List[Any] = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
A_ : List[str] = jit(model.generate )
A_ : Union[str, Any] = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ ( self ) -> int:
A_ : str = self._get_input_ids_and_config()
A_ : List[Any] = max_length
A_ : List[str] = 1
A_ : Optional[int] = 8
A_ : List[Any] = 9
for model_class in self.all_generative_model_classes:
A_ : int = model_class(__UpperCAmelCase )
A_ : Dict = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
A_ : List[Any] = jit(model.generate )
A_ : List[str] = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Tuple = self._get_input_ids_and_config()
A_ : Tuple = max_length
A_ : List[Any] = 2
A_ : str = 1
A_ : Tuple = 8
A_ : int = 9
for model_class in self.all_generative_model_classes:
A_ : str = model_class(__UpperCAmelCase )
A_ : Union[str, Any] = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
A_ : List[Any] = jit(model.generate )
A_ : Optional[int] = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
A_ : Tuple = attention_mask.at[(0, 0)].set(0 )
A_ : Union[str, Any] = False
A_ : List[str] = max_length
for model_class in self.all_generative_model_classes:
A_ : Any = model_class(__UpperCAmelCase )
A_ : str = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
A_ : Dict = jit(model.generate )
A_ : int = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ ( self ) -> int:
A_ : int = self._get_input_ids_and_config()
# pad attention mask on the left
A_ : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
A_ : Optional[int] = True
A_ : List[str] = max_length
for model_class in self.all_generative_model_classes:
A_ : List[str] = model_class(__UpperCAmelCase )
A_ : Any = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
A_ : int = jit(model.generate )
A_ : Union[str, Any] = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : Any = self._get_input_ids_and_config()
# pad attention mask on the left
A_ : str = attention_mask.at[(0, 0)].set(0 )
A_ : Optional[int] = 2
A_ : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
A_ : Any = model_class(__UpperCAmelCase )
A_ : List[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
A_ : Optional[Any] = jit(model.generate )
A_ : Dict = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
A_ : Optional[Any] = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A_ : Optional[Any] = """Hello world"""
A_ : Optional[Any] = tokenizer(__UpperCAmelCase , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCAmelCase , """do_samples""" ):
model.generate(__UpperCAmelCase , do_samples=__UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCAmelCase , """foo""" ):
A_ : Optional[Any] = {"""foo""": """bar"""}
model.generate(__UpperCAmelCase , **__UpperCAmelCase )
| 366
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 164
| 0
|
from __future__ import annotations
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 189
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Tuple ={
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] =[
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 189
| 1
|
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = encoder_stride
_lowerCAmelCase = num_attention_outputs
_lowerCAmelCase = embed_dim
_lowerCAmelCase = embed_dim + 1
_lowerCAmelCase = resolution
_lowerCAmelCase = depths
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = dim
_lowerCAmelCase = mlp_expansion_ratio
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFEfficientFormerModel(config=_lowercase )
_lowerCAmelCase = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.type_sequence_label_size
_lowerCAmelCase = TFEfficientFormerForImageClassification(_lowercase )
_lowerCAmelCase = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = TFEfficientFormerForImageClassification(_lowercase )
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : List[str] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowercase : str = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowercase : Dict = False
_lowercase : Optional[int] = False
_lowercase : str = False
_lowercase : str = False
_lowercase : int = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFEfficientFormerModelTester(self )
_lowerCAmelCase = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
_lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
_lowerCAmelCase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
_lowerCAmelCase = seq_length * self.model_tester.chunk_length
else:
_lowerCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_lowerCAmelCase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
_lowerCAmelCase = getattr(self.model_tester , """seq_length""" , _lowercase )
_lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase=False ):
"""simple docstring"""
_lowerCAmelCase = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """seq_length""" , _lowercase )
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
_lowerCAmelCase = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
_lowerCAmelCase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
_lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
_lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_lowerCAmelCase = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_lowerCAmelCase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_lowerCAmelCase = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def A ():
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
_lowerCAmelCase = model(**_lowercase , training=_lowercase )
# verify the logits
_lowerCAmelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
_lowerCAmelCase = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
_lowerCAmelCase = model(**_lowercase , training=_lowercase )
# verify the logits
_lowerCAmelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
_lowerCAmelCase = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 229
|
'''simple docstring'''
import logging
from transformers import PretrainedConfig
_lowercase = logging.getLogger(__name__)
_lowercase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[Any] = '''bertabs'''
def __init__( self , _lowercase=30_522 , _lowercase=512 , _lowercase=6 , _lowercase=512 , _lowercase=8 , _lowercase=512 , _lowercase=0.2 , _lowercase=6 , _lowercase=768 , _lowercase=8 , _lowercase=2_048 , _lowercase=0.2 , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_pos
_lowerCAmelCase = enc_layers
_lowerCAmelCase = enc_hidden_size
_lowerCAmelCase = enc_heads
_lowerCAmelCase = enc_ff_size
_lowerCAmelCase = enc_dropout
_lowerCAmelCase = dec_layers
_lowerCAmelCase = dec_hidden_size
_lowerCAmelCase = dec_heads
_lowerCAmelCase = dec_ff_size
_lowerCAmelCase = dec_dropout
| 229
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ : Optional[int] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 125
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __a :
__a : int = BlenderbotConfig
__a : Any = {}
__a : str = "gelu"
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=13 , __magic_name__ : Any=7 , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : Any=99 , __magic_name__ : List[Any]=32 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : List[Any]=4 , __magic_name__ : List[str]=37 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=20 , __magic_name__ : List[str]=2 , __magic_name__ : Any=1 , __magic_name__ : Union[str, Any]=0 , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Union[str, Any] = pad_token_id
UpperCAmelCase_ : Tuple = bos_token_id
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : str = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ : str = prepare_blenderbot_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = TFBlenderbotModel(config=__magic_name__ ).get_decoder()
UpperCAmelCase_ : Union[str, Any] = inputs_dict['''input_ids''']
UpperCAmelCase_ : Any = input_ids[:1, :]
UpperCAmelCase_ : Tuple = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase_ : List[str] = inputs_dict['''head_mask''']
UpperCAmelCase_ : Any = 1
# first forward pass
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-3 )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Tuple=None, SCREAMING_SNAKE_CASE__ : Any=None, ) -> Any:
if attention_mask is None:
UpperCAmelCase_ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__a : Tuple = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__a : List[str] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a : List[str] = True
__a : Any = False
__a : Optional[int] = False
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = TFBlenderbotModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
@require_tokenizers
@require_tf
class __a (unittest.TestCase ):
__a : Union[str, Any] = ["My friends are cool but they eat too many carbs."]
__a : List[Any] = "facebook/blenderbot-400M-distill"
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase_ : List[Any] = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__magic_name__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 125
| 1
|
'''simple docstring'''
def __a ( UpperCAmelCase = 4000000 ) ->int:
"""simple docstring"""
A = []
A , A = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(UpperCAmelCase )
A , A = b, a + b
return sum(UpperCAmelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 337
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''perceiver'''
def __init__(self : Dict , _lowerCAmelCase : List[str]=256 , _lowerCAmelCase : Any=1280 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Optional[int]=26 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]="kv" , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=262 , _lowerCAmelCase : int=2048 , _lowerCAmelCase : int=56 , _lowerCAmelCase : List[Any]=[368, 496] , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Any=1920 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : List[Any]=[1, 16, 224, 224] , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = num_latents
A = d_latents
A = d_model
A = num_blocks
A = num_self_attends_per_block
A = num_self_attention_heads
A = num_cross_attention_heads
A = qk_channels
A = v_channels
A = cross_attention_shape_for_attention
A = self_attention_widening_factor
A = cross_attention_widening_factor
A = hidden_act
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = use_query_residual
# masked language modeling attributes
A = vocab_size
A = max_position_embeddings
# image classification attributes
A = image_size
# flow attributes
A = train_size
# multimodal autoencoding attributes
A = num_frames
A = audio_samples_per_frame
A = samples_per_patch
A = output_shape
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
def A (self : List[str] ):
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def A (self : Dict ):
return 1e-4
def A (self : List[Any] , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = preprocessor.num_special_tokens_to_add(_lowerCAmelCase )
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A = [""" """.join(["""a"""] ) * seq_length] * batch_size
A = dict(preprocessor(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
A = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A = dict(preprocessor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 337
| 1
|
def A__ ( __lowerCamelCase ):
return "".join(chr(ord(__lowerCamelCase ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 299
|
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 1
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
__UpperCAmelCase : Tuple = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
__UpperCAmelCase : str = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CamembertTokenizer
lowerCAmelCase__ = CamembertTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Union[str, Any] = CamembertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = """<pad>"""
__snake_case: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_004 )
def UpperCAmelCase__ ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def UpperCAmelCase__ ( self : str ):
__snake_case: int = CamembertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
__snake_case: Dict = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__snake_case: Union[str, Any] = """I was born in 92000, and this is falsé."""
__snake_case: List[Any] = tokenizer.encode(A )
__snake_case: Tuple = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
__snake_case: Dict = tokenizer.encode(A , add_special_tokens=A )
__snake_case: Union[str, Any] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__snake_case: Tuple = tokenizer.convert_ids_to_tokens(A )
__snake_case: Any = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : List[str] ):
if not self.test_rust_tokenizer:
return
__snake_case: Any = self.get_tokenizer()
__snake_case: Tuple = self.get_rust_tokenizer()
__snake_case: List[str] = """I was born in 92000, and this is falsé."""
__snake_case: Optional[int] = tokenizer.tokenize(A )
__snake_case: str = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
__snake_case: str = tokenizer.encode(A , add_special_tokens=A )
__snake_case: Dict = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
__snake_case: Tuple = self.get_rust_tokenizer()
__snake_case: str = tokenizer.encode(A )
__snake_case: Any = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase__ ( self : List[str] ):
# fmt: off
__snake_case: Tuple = {"""input_ids""": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__snake_case: Optional[Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=A , )
| 293
|
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE__)
if number < 1:
__snake_case: Optional[int] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE__)
elif number == 1:
return 3
elif number == 2:
return 5
else:
__snake_case: List[Any] = int(math.log(number // 3 , 2)) + 2
__snake_case: str = [3, 5]
__snake_case: int = 2
__snake_case: List[str] = 3
for block in range(1 , SCREAMING_SNAKE_CASE__):
for _ in range(SCREAMING_SNAKE_CASE__):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__UpperCAmelCase : Optional[int] = 0
try:
__UpperCAmelCase : int = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 293
| 1
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : str = IFPipeline
_UpperCAmelCase : Tuple = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_UpperCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _SCREAMING_SNAKE_CASE ( self : str):
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]=0):
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: int = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Optional[int] = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA")
def _SCREAMING_SNAKE_CASE ( self : Dict):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : Dict):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : int):
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _SCREAMING_SNAKE_CASE ( self : Dict):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# if
SCREAMING_SNAKE_CASE_: str = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE_: Optional[int] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda")
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = pipe_a.encode_prompt("anime turtle" , device="cuda")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE_: Any = None
SCREAMING_SNAKE_CASE_: int = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE_: Any = IFImgaImgPipeline(**pipe_a.components)
SCREAMING_SNAKE_CASE_: Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE_: Optional[int] = IFInpaintingPipeline(**pipe_a.components)
SCREAMING_SNAKE_CASE_: Optional[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
SCREAMING_SNAKE_CASE_: Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_: Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE_: Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy")
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_: List[Any] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_: Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE_: str = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_: Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy")
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int]):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_: List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
SCREAMING_SNAKE_CASE_: str = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_: List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE_: int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy")
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE_: Any = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_: Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_: Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy")
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_: List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_: Tuple = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
SCREAMING_SNAKE_CASE_: int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_: Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE_: Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy")
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_: List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_: List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_: List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy")
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
def A_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 13
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 164
| 0
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case__ : Tuple = logging.getLogger(__name__)
snake_case__ : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
snake_case__ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase_ :Optional[str] = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
lowerCamelCase_ :Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCamelCase_ )} , )
lowerCamelCase_ :Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase_ :Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase_ :Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase_ :Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''The input training data file (a text file).'''} )
lowerCamelCase_ :Optional[str] = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
lowerCamelCase_ :Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCamelCase_ :Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
lowerCamelCase_ :Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
lowerCamelCase_ :bool = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
lowerCamelCase_ :bool = field(
default=lowerCamelCase_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
lowerCamelCase_ :bool = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
lowerCamelCase_ :float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
lowerCamelCase_ :float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
lowerCamelCase_ :int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
lowerCamelCase_ :int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
lowerCamelCase_ :bool = field(
default=lowerCamelCase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
"""simple docstring"""
def _dataset(lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase_ : int = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
UpperCAmelCase_ : Dict = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
UpperCAmelCase_ : Optional[int] = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
UpperCAmelCase_ : Optional[int] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase_ : int = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase_ : Optional[Any] = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase_ : List[str] = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase_ : int = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase_ : Union[str, Any] = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase_ : Dict = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ : str = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
UpperCAmelCase_ : Optional[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase_ : str = trainer.evaluate()
UpperCAmelCase_ : Dict = math.exp(eval_output['eval_loss'] )
UpperCAmelCase_ : str = {'perplexity': perplexity}
UpperCAmelCase_ : Tuple = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , lowerCamelCase_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCamelCase ( lowerCamelCase_ : Dict ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 274
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : List[Any] = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : List[Any] = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : Any = num_choices
UpperCAmelCase_ : List[str] = scope
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , use_stable_embedding=snake_case_ , )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = OpenLlamaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : str = model(snake_case_ , attention_mask=snake_case_ )
UpperCAmelCase_ : Dict = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Dict = OpenLlamaModel(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
UpperCAmelCase_ : Any = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , )
UpperCAmelCase_ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = OpenLlamaForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = OpenLlamaForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
UpperCAmelCase_ : Tuple = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , use_cache=snake_case_ , )
UpperCAmelCase_ : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ : List[Any] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_hidden_states=snake_case_ , )['hidden_states'][0]
UpperCAmelCase_ : List[str] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )['hidden_states'][0]
# select random slice
UpperCAmelCase_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = config_and_inputs
UpperCAmelCase_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :Tuple = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCamelCase_ :Tuple = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ :Union[str, Any] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ :str = False
lowerCamelCase_ :Optional[int] = False
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = OpenLlamaModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : int = type
self.model_tester.create_and_check_model(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : Union[str, Any] = input_dict['input_ids']
UpperCAmelCase_ : int = input_ids.ne(1 ).to(snake_case_ )
UpperCAmelCase_ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Any = OpenLlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : str = 'single_label_classification'
UpperCAmelCase_ : List[str] = input_dict['input_ids']
UpperCAmelCase_ : Optional[Any] = input_ids.ne(1 ).to(snake_case_ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = OpenLlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : int = 'multi_label_classification'
UpperCAmelCase_ : Dict = input_dict['input_ids']
UpperCAmelCase_ : int = input_ids.ne(1 ).to(snake_case_ )
UpperCAmelCase_ : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = OpenLlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = ids_tensor([1, 1_0] , config.vocab_size )
UpperCAmelCase_ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : Tuple = OpenLlamaModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
UpperCAmelCase_ : List[str] = original_model(snake_case_ ).last_hidden_state
UpperCAmelCase_ : Any = original_model(snake_case_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : Tuple = {'type': scaling_type, 'factor': 10.0}
UpperCAmelCase_ : List[str] = OpenLlamaModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
UpperCAmelCase_ : Tuple = scaled_model(snake_case_ ).last_hidden_state
UpperCAmelCase_ : Optional[int] = scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
| 274
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_A : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Tuple = BartphoTokenizer
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : int = True
def a ( self : Tuple ) -> Optional[Any]:
super().setUp()
__lowerCAmelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCAmelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__lowerCAmelCase = BartphoTokenizer(SCREAMING_SNAKE_CASE__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : str , **SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = """This is a là test"""
__lowerCAmelCase = """This is a<unk><unk> test"""
return input_text, output_text
def a ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = BartphoTokenizer(SCREAMING_SNAKE_CASE__ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCAmelCase = """This is a là test"""
__lowerCAmelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCAmelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
| 229
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _lowercase ( datasets.BuilderConfig ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class _lowercase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = PandasConfig
def a ( self : Union[str, Any] ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE__ , (str, list, tuple) ):
__lowerCAmelCase = data_files
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE__ , gen_kwargs={"""files""": files} ) )
return splits
def a ( self : Any , SCREAMING_SNAKE_CASE__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase = table_cast(SCREAMING_SNAKE_CASE__ , self.config.features.arrow_schema )
return pa_table
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
for i, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) ):
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as f:
__lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(SCREAMING_SNAKE_CASE__ ) )
yield i, self._cast_table(SCREAMING_SNAKE_CASE__ )
| 229
| 1
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCamelCase_ = pytest.mark.integration
@require_faiss
class _UpperCAmelCase ( __lowercase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(UpperCAmelCase__ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
import faiss
_A = self._create_dummy_dataset()
_A = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ )
_A = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_A , _A = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
import faiss
_A = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_A , _A = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
import faiss
_A = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase__ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
_A , _A = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(UpperCAmelCase__ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
from elasticsearch import Elasticsearch
_A = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
_A = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
_A = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
_A = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=UpperCAmelCase__ )
_A , _A = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _UpperCAmelCase ( __lowercase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
import faiss
_A = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_A = np.zeros(5 , dtype=np.floataa )
_A = 1
_A , _A = index.search(UpperCAmelCase__ )
self.assertRaises(UpperCAmelCase__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_A = np.eye(5 , dtype=np.floataa )[::-1]
_A , _A = index.search_batch(UpperCAmelCase__ )
self.assertRaises(UpperCAmelCase__ , index.search_batch , queries[0] )
_A = [scores[0] for scores in total_scores]
_A = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase__ )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
import faiss
_A = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_A = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase__ ):
_A = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
import faiss
_A = faiss.IndexFlat(5 )
_A = FaissIndex(custom_index=UpperCAmelCase__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
import faiss
_A = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase__ ) as tmp_file:
index.save(tmp_file.name )
_A = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_A = np.zeros(5 , dtype=np.floataa )
_A = 1
_A , _A = index.search(UpperCAmelCase__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __lowercase ( __lowercase ) -> Tuple:
'''simple docstring'''
import faiss
_A = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_A = "index.faiss"
_A = F'''mock://{index_name}'''
index.save(__lowercase , storage_options=mockfs.storage_options )
_A = FaissIndex.load(__lowercase , storage_options=mockfs.storage_options )
_A = np.zeros(5 , dtype=np.floataa )
_A = 1
_A , _A = index.search(__lowercase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _UpperCAmelCase ( __lowercase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
_A = Elasticsearch()
_A = {"acknowledged": True}
_A = ElasticSearchIndex(es_client=UpperCAmelCase__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
_A = "foo"
_A = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_A , _A = index.search(UpperCAmelCase__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_A = "foo"
_A = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_A , _A = index.search(UpperCAmelCase__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_A = ["foo", "bar", "foobar"]
_A = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_A , _A = index.search_batch(UpperCAmelCase__ )
_A = [scores[0] for scores in total_scores]
_A = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase__ )
# batched queries with timeout
_A = ["foo", "bar", "foobar"]
_A = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_A , _A = index.search_batch(UpperCAmelCase__ , request_timeout=30 )
_A = [scores[0] for scores in total_scores]
_A = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase__ )
| 361
|
'''simple docstring'''
import os
lowerCamelCase_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = 0
_A = 0
while index < len(__lowercase ) - 1:
_A = SYMBOLS[numerals[index]]
_A = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = ""
_A = num // 1000
numerals += m_count * "M"
num %= 1000
_A = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_A = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowercase ( __lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_A = 0
with open(os.path.dirname(__lowercase ) + roman_numerals_filename ) as filea:
_A = filea.readlines()
for line in lines:
_A = line.strip()
_A = parse_roman_numerals(__lowercase )
_A = generate_roman_numerals(__lowercase )
savings += len(__lowercase ) - len(__lowercase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174
| 0
|
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337
| 1
|
def _a ( a :Any = 3 , a :str = 7 , a :List[str] = 1_000_000 ) -> int:
a = 0
a = 1
for current_denominator in range(1 , limit + 1 ):
a = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
a = current_numerator
a = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 366
|
UpperCAmelCase__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 26
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Dict = """camembert"""
def __init__( self , __UpperCAmelCase=3_0_5_2_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :List[str] = vocab_size
lowerCAmelCase__ :int = hidden_size
lowerCAmelCase__ :Union[str, Any] = num_hidden_layers
lowerCAmelCase__ :Any = num_attention_heads
lowerCAmelCase__ :Optional[int] = hidden_act
lowerCAmelCase__ :List[str] = intermediate_size
lowerCAmelCase__ :List[str] = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :Dict = max_position_embeddings
lowerCAmelCase__ :List[Any] = type_vocab_size
lowerCAmelCase__ :Optional[int] = initializer_range
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :int = position_embedding_type
lowerCAmelCase__ :Tuple = use_cache
lowerCAmelCase__ :Optional[Any] = classifier_dropout
class _lowerCAmelCase ( a ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ :Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ :Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 293
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 1_0
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = [1, 2, 3, 4]
lowerCAmelCase__ :Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = process_story(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = ''
lowerCAmelCase__ , lowerCAmelCase__ :Any = process_story(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [] )
self.assertEqual(__UpperCAmelCase , [] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
lowerCAmelCase__ , lowerCAmelCase__ :str = process_story(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = ['It was the best of times.']
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = torch.tensor([1, 2, 3, 4] )
lowerCAmelCase__ :List[str] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowerCAmelCase__ :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 2_3 ).numpy() , expected.numpy() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCAmelCase__ :Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 1_0_1
lowerCAmelCase__ :str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowerCAmelCase__ :Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCAmelCase__ :List[Any] = compute_token_type_ids(__UpperCAmelCase , __UpperCAmelCase )
np.testing.assert_array_equal(__UpperCAmelCase , __UpperCAmelCase )
| 293
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = pd.read_csv("""sample_data.csv""", header=None)
UpperCamelCase__ : Tuple = df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCamelCase__ : List[Any] = df.iloc[:, 1:2]
UpperCamelCase__ : Union[str, Any] = actual_data.values.reshape(len_data, 1)
UpperCamelCase__ : List[Any] = MinMaxScaler().fit_transform(actual_data)
UpperCamelCase__ : Optional[Any] = 10
UpperCamelCase__ : int = 5
UpperCamelCase__ : List[str] = 20
UpperCamelCase__ : Optional[int] = len_data - periods * look_back
UpperCamelCase__ : Union[str, Any] = actual_data[:division]
UpperCamelCase__ : str = actual_data[division - look_back :]
UpperCamelCase__ : Union[str, Any] = [], []
UpperCamelCase__ : str = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCamelCase__ : List[str] = np.array(train_x)
UpperCamelCase__ : Optional[Any] = np.array(test_x)
UpperCamelCase__ : Tuple = np.array([list(i.ravel()) for i in train_y])
UpperCamelCase__ : Optional[Any] = np.array([list(i.ravel()) for i in test_y])
UpperCamelCase__ : Union[str, Any] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
UpperCamelCase__ : Tuple = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCamelCase__ : Tuple = model.predict(x_test)
| 351
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
a = '''The dog is cute and lives in the garden house'''
a = jnp.array([tokenizer.encode(__lowerCamelCase )] )
a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
a = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
a = model(__lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape ,__lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
| 330
| 0
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( __a :Optional[Any] , __a :str , __a :Dict ) -> Union[str, Any]:
"""simple docstring"""
A__ = 0
if start < end:
A__ = randint(__a , __a )
A__ = a[end]
A__ = a[pivot]
A__ = temp
A__ , A__ = _in_place_partition(__a , __a , __a )
count += _in_place_quick_sort(__a , __a , p - 1 )
count += _in_place_quick_sort(__a , p + 1 , __a )
return count
def __lowerCamelCase ( __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> List[str]:
"""simple docstring"""
A__ = 0
A__ = randint(__a , __a )
A__ = a[end]
A__ = a[pivot]
A__ = temp
A__ = start - 1
for index in range(__a , __a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
A__ = new_pivot_index + 1
A__ = a[new_pivot_index]
A__ = a[index]
A__ = temp
A__ = a[new_pivot_index + 1]
A__ = a[end]
A__ = temp
return new_pivot_index + 1, count
A : List[str] = TemporaryFile()
A : Optional[Any] = 1_0_0 # 1000 elements are to be sorted
A, A : Tuple = 0, 1 # mean and standard deviation
A : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
A : Dict = np.load(outfile)
A : Optional[Any] = len(M) - 1
A : str = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 274
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
A : Dict = Lock()
def __lowerCamelCase ( __a :Dict , __a :List[str] , __a :Optional[int] , __a :Optional[int] , __a :Optional[Any] , __a :Optional[int] , __a :int ) -> Dict:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A__ = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A__ = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def __lowerCamelCase ( __a :List[str] ) -> int:
"""simple docstring"""
A__ = []
A__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A__ = temp_rs
A__ = temp_rr
for i in range(1 , len(__a ) - 1 ):
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A__ = temp_rs
A__ = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
A__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = list(range(1_0 , 0 , -1 ) )
print("""Initial List""" )
print(*__a )
A__ = odd_even_transposition(__a )
print("""Sorted List\n""" )
print(*__a )
if __name__ == "__main__":
main()
| 274
| 1
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case_ = logging.get_logger(__name__)
snake_case_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
snake_case_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
snake_case_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
snake_case_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
snake_case_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
snake_case_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : str = FLAX_MODEL_MAPPING
snake_case_ = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Optional[int] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : str = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Tuple = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 363
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def snake_case__ ( SCREAMING_SNAKE_CASE_ : BertModel , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Tuple = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
lowercase__ : Dict = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ : str ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f"""bert/{name}"""
def create_tf_var(SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : tf.Session ):
lowercase__ : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ : List[str] = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ : Tuple = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ : str = torch_tensor.T
lowercase__ : Any = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = session.run(SCREAMING_SNAKE_CASE_ )
print(f"""Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
lowercase__ : Tuple = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory in which to save tensorflow model' )
lowercase__ : List[str] = parser.parse_args(SCREAMING_SNAKE_CASE_ )
lowercase__ : int = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 216
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase : str = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
__lowerCAmelCase = F"""{src_lang}-{tgt_lang}"""
__lowerCAmelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase)
__lowerCAmelCase = os.path.join(lowerCamelCase, '''README.md''')
print(F"""Generating {path}""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write(lowerCamelCase)
# make sure we are under the root of the project
_UpperCAmelCase : Dict = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase : Optional[int] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = model_name.split("""-""")
_UpperCAmelCase : Union[str, Any] = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 174
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a__ : List[Any] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
__SCREAMING_SNAKE_CASE = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''})
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
__SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''})
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
__UpperCamelCase = import_module("""tasks""" )
try:
__UpperCamelCase = getattr(__A ,model_args.task_type )
__UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" ,__A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase = dict(enumerate(__A ) )
__UpperCamelCase = len(__A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__A ,idalabel=__A ,labelaid={label: i for i, label in enumerate(__A )} ,cache_dir=model_args.cache_dir ,)
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast ,)
__UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=__A ,cache_dir=model_args.cache_dir ,)
# Get datasets
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=__A ,data_dir=data_args.data_dir ,tokenizer=__A ,labels=__A ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=__A ,data_dir=data_args.data_dir ,tokenizer=__A ,labels=__A ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def align_predictions(__A ,__A ) -> Tuple[List[int], List[int]]:
__UpperCamelCase = np.argmax(__A ,axis=2 )
__UpperCamelCase , __UpperCamelCase = preds.shape
__UpperCamelCase = [[] for _ in range(__A )]
__UpperCamelCase = [[] for _ in range(__A )]
for i in range(__A ):
for j in range(__A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__A ) -> Dict:
__UpperCamelCase , __UpperCamelCase = align_predictions(p.predictions ,p.label_ids )
return {
"accuracy_score": accuracy_score(__A ,__A ),
"precision": precision_score(__A ,__A ),
"recall": recall_score(__A ,__A ),
"f1": fa_score(__A ,__A ),
}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(__A ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=__A ,args=__A ,train_dataset=__A ,eval_dataset=__A ,compute_metrics=__A ,data_collator=__A ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir ,"""eval_results.txt""" )
if trainer.is_world_process_zero():
with open(__A ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" ,__A ,__A )
writer.write("""%s = %s\n""" % (key, value) )
results.update(__A )
# Predict
if training_args.do_predict:
__UpperCamelCase = TokenClassificationDataset(
token_classification_task=__A ,data_dir=data_args.data_dir ,tokenizer=__A ,labels=__A ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.test ,)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = trainer.predict(__A )
__UpperCamelCase , __UpperCamelCase = align_predictions(__A ,__A )
__UpperCamelCase = os.path.join(training_args.output_dir ,"""test_results.txt""" )
if trainer.is_world_process_zero():
with open(__A ,"""w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" ,__A ,__A )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
__UpperCamelCase = os.path.join(training_args.output_dir ,"""test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(__A ,"""w""" ) as writer:
with open(os.path.join(data_args.data_dir ,"""test.txt""" ) ,"""r""" ) as f:
token_classification_task.write_predictions_to_file(__A ,__A ,__A )
return results
def _lowercase ( __A ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 243
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Union[str, Any]:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> List[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> Optional[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Optional[int]:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Tuple:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> Dict:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 243
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_lowerCAmelCase = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"""do_convert_rgb""": True,
}
_lowerCAmelCase = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_snake_case , _snake_case )
def snake_case ( self , **_snake_case ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case ( self , **_snake_case ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case ( self , **_snake_case ):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case )
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _snake_case )
self.assertIsInstance(processor_fast.tokenizer , _snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _snake_case )
self.assertIsInstance(processor_fast.image_processor , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
_lowerCAmelCase = self.get_image_processor(do_normalize=_snake_case )
_lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=_snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(_snake_case , return_tensors="""np""" )
_lowerCAmelCase = processor(images=_snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
_lowerCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
_lowerCAmelCase = processor(text=_snake_case )
_lowerCAmelCase = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
_lowerCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(_snake_case )
_lowerCAmelCase = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
_lowerCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 82
|
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.maximum(0,snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 26
| 0
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = nn.functional.normalize(lowerCamelCase)
__lowerCAmelCase = nn.functional.normalize(lowerCamelCase)
return torch.mm(lowerCamelCase, normalized_text_embeds.t())
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = CLIPConfig
__UpperCamelCase : Optional[Any] = ['CLIPEncoderLayer']
def __init__(self , __lowercase ):
super().__init__(__lowercase )
__lowerCAmelCase = CLIPVisionModel(config.vision_config )
__lowerCAmelCase = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__lowercase )
__lowerCAmelCase = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__lowercase )
__lowerCAmelCase = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__lowercase )
__lowerCAmelCase = nn.Parameter(torch.ones(17 ) , requires_grad=__lowercase )
__lowerCAmelCase = nn.Parameter(torch.ones(3 ) , requires_grad=__lowercase )
@torch.no_grad()
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = self.vision_model(__lowercase )[1] # pooled_output
__lowerCAmelCase = self.visual_projection(__lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCAmelCase = cosine_distance(__lowercase , self.special_care_embeds ).cpu().float().numpy()
__lowerCAmelCase = cosine_distance(__lowercase , self.concept_embeds ).cpu().float().numpy()
__lowerCAmelCase = []
__lowerCAmelCase = image_embeds.shape[0]
for i in range(__lowercase ):
__lowerCAmelCase = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__lowerCAmelCase = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__lowerCAmelCase = special_cos_dist[i][concept_idx]
__lowerCAmelCase = self.special_care_embeds_weights[concept_idx].item()
__lowerCAmelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
__lowerCAmelCase = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
__lowerCAmelCase = cos_dist[i][concept_idx]
__lowerCAmelCase = self.concept_embeds_weights[concept_idx].item()
__lowerCAmelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__lowercase )
result.append(__lowercase )
__lowerCAmelCase = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = self.vision_model(__lowercase )[1] # pooled_output
__lowerCAmelCase = self.visual_projection(__lowercase )
__lowerCAmelCase = cosine_distance(__lowercase , self.special_care_embeds )
__lowerCAmelCase = cosine_distance(__lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__lowerCAmelCase = 0.0
__lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__lowerCAmelCase = torch.any(special_scores > 0 , dim=1 )
__lowerCAmelCase = special_care * 0.0_1
__lowerCAmelCase = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__lowerCAmelCase = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__lowerCAmelCase = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 363
|
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __magic_name__( ):
__lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)]
__lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0)
return (arr, r)
_UpperCAmelCase : Dict = make_dataset()
def __magic_name__( lowerCamelCase, lowerCamelCase):
for triplet in permutations(lowerCamelCase, 3):
if sum(lowerCamelCase) == target:
return tuple(sorted(lowerCamelCase))
return (0, 0, 0)
def __magic_name__( lowerCamelCase, lowerCamelCase):
arr.sort()
__lowerCAmelCase = len(lowerCamelCase)
for i in range(n - 1):
__lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __magic_name__( ):
__lowerCAmelCase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__lowerCAmelCase = '''
triplet_sum1(*dataset)
'''
__lowerCAmelCase = '''
triplet_sum2(*dataset)
'''
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
return (min(lowerCamelCase), min(lowerCamelCase))
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase : Union[str, Any] = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 9
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class A__ :
lowerCAmelCase__ : str = BlenderbotSmallConfig
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : str = "gelu"
def __init__( self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any=False , _UpperCAmelCase : Any=99 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Optional[Any]=37 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : List[Any]=20 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : Tuple=0 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_blenderbot_small_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def a__ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFBlenderbotSmallModel(config=__UpperCAmelCase ).get_decoder()
__lowercase = inputs_dict['input_ids']
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict['attention_mask'][:1, :]
__lowercase = inputs_dict['head_mask']
__lowercase = 1
# first forward pass
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1e-3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : str=None , ) -> Dict:
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowerCAmelCase__ : int = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase__ : Any = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Tuple = False
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = TFBlenderbotSmallModelTester(self )
__lowercase = ConfigTester(self , config_class=__UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
@require_tokenizers
@require_tf
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : Dict = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
lowerCAmelCase__ : List[Any] = "facebook/blenderbot_small-90M"
@cached_property
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def a__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.tokenizer(self.src_text , return_tensors='tf' )
__lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , )
__lowercase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
a_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ):
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase )
if weight_type is not None:
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ):
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,)
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase )
if "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = '''weight'''
else:
__lowerCamelCase = None
set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ):
if config_path is not None:
__lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCamelCase = UniSpeechSatConfig()
__lowerCamelCase = ''''''
if is_finetuned:
__lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase )
else:
__lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__lowerCamelCase = model[0].eval()
recursively_load_weights(_UpperCamelCase ,_UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 330
| 0
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Any:
snake_case = VideoMAEConfig()
set_architecture_configs(__lowerCAmelCase , __lowerCAmelCase )
if "finetuned" not in model_name:
snake_case = False
if "finetuned" in model_name:
snake_case = """huggingface/label-files"""
if "kinetics" in model_name:
snake_case = 4_00
snake_case = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
snake_case = 1_74
snake_case = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
snake_case = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if "small" in model_name:
snake_case = 3_84
snake_case = 15_36
snake_case = 12
snake_case = 16
snake_case = 12
snake_case = 3
snake_case = 1_92
snake_case = 7_68
elif "large" in model_name:
snake_case = 10_24
snake_case = 40_96
snake_case = 24
snake_case = 16
snake_case = 12
snake_case = 8
snake_case = 5_12
snake_case = 20_48
elif "huge" in model_name:
snake_case = 12_80
snake_case = 51_20
snake_case = 32
snake_case = 16
snake_case = 12
snake_case = 8
snake_case = 6_40
snake_case = 25_60
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def __lowerCamelCase ( __lowerCAmelCase : str ) -> Union[str, Any]:
if "encoder." in name:
snake_case = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
snake_case = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
snake_case = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
snake_case = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
snake_case = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
snake_case = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
snake_case = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
snake_case = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
snake_case = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
snake_case = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
snake_case = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
snake_case = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
snake_case = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
snake_case = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
snake_case = name.replace("""head""" , """classifier""" )
return name
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : str ) -> List[str]:
for key in orig_state_dict.copy().keys():
snake_case = orig_state_dict.pop(__lowerCAmelCase )
if key.startswith("""encoder.""" ):
snake_case = key.replace("""encoder.""" , """""" )
if "qkv" in key:
snake_case = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
snake_case = config.decoder_hidden_size
snake_case = int(key_split[2] )
snake_case = """decoder.decoder_layers."""
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[dim : dim * 2, :]
snake_case = val[-dim:, :]
else:
snake_case = config.hidden_size
snake_case = int(key_split[1] )
snake_case = """videomae.encoder.layer."""
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[dim : dim * 2, :]
snake_case = val[-dim:, :]
else:
snake_case = val
return orig_state_dict
def __lowerCamelCase ( ) -> int:
snake_case = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
snake_case = np.load(__lowerCAmelCase )
return list(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
snake_case = get_videomae_config(__lowerCAmelCase )
if "finetuned" in model_name:
snake_case = VideoMAEForVideoClassification(__lowerCAmelCase )
else:
snake_case = VideoMAEForPreTraining(__lowerCAmelCase )
# download original checkpoint, hosted on Google Drive
snake_case = """pytorch_model.bin"""
gdown.cached_download(__lowerCAmelCase , __lowerCAmelCase , quiet=__lowerCAmelCase )
snake_case = torch.load(__lowerCAmelCase , map_location="""cpu""" )
if "model" in files:
snake_case = files["""model"""]
else:
snake_case = files["""module"""]
snake_case = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify model on basic input
snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case = prepare_video()
snake_case = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
if "finetuned" not in model_name:
snake_case = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
snake_case = torch.load(__lowerCAmelCase )
snake_case = model(**__lowerCAmelCase )
snake_case = outputs.logits
snake_case = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case = torch.Size([1, 1_74] )
snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case = torch.Size([1, 1_74] )
snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case = torch.Size([1, 1_74] )
snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case = outputs.loss
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(__lowerCAmelCase , organization="""nielsr""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 3
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : Dict )-> str:
return 32
@property
def lowerCAmelCase ( self : int )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : str )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : str )-> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : int )-> Dict:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = """A robot, 4k photo"""
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3
| 1
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : Tuple = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : int = {
"allenai/led-base-16384": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Union[str, Any]="replace" , lowerCAmelCase_ : Any="<s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : Tuple="<unk>" , lowerCAmelCase_ : str="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : str=True , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , snake_case_) != add_prefix_space:
lowercase_ = getattr(snake_case_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**snake_case_)
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = '''post_processor'''
lowercase_ = getattr(self.backend_tokenizer , snake_case_ , snake_case_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , snake_case_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , snake_case_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(snake_case_ , state.pop("""type"""))
lowercase_ = component_class(**snake_case_)
setattr(self.backend_tokenizer , snake_case_ , snake_case_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_) if isinstance(snake_case_ , snake_case_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , snake_case_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*snake_case_ , **snake_case_)
def _UpperCAmelCase ( self : List[Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , snake_case_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*snake_case_ , **snake_case_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(snake_case_ , name=snake_case_)
return tuple(snake_case_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = super()._pad(
encoded_inputs=snake_case_ , max_length=snake_case_ , padding_strategy=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(snake_case_)
if needs_to_be_padded:
lowercase_ = len(snake_case_) - len(encoded_inputs["""global_attention_mask"""])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return encoded_inputs
| 136
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ =logging.get_logger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ):
__a : List[str] = os.path.abspath(lowerCAmelCase__ )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__a : Tuple = tf.train.list_variables(lowerCAmelCase__ )
__a : Optional[Any] = []
__a : Union[str, Any] = []
__a : str = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__a : Any = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__a : Any = name[1:]
# figure out how many levels deep the name is
__a : List[Any] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase__ )
# read data
__a : Tuple = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
names.append('''/'''.join(lowerCAmelCase__ ) )
arrays.append(lowerCAmelCase__ )
logger.info(f"Read a total of {len(lowerCAmelCase__ ):,} layers" )
# Sanity check
if len(set(lowerCAmelCase__ ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(lowerCAmelCase__ ) )})" )
__a : int = list(set(lowerCAmelCase__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : int = full_name.split('''/''' )
__a : Tuple = model
__a : Dict = []
for i, m_name in enumerate(lowerCAmelCase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__a : Union[str, Any] = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''embeddings''' )
__a : List[str] = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__a : Dict = getattr(lowerCAmelCase__ , '''encoder''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''layer''' )
__a : Any = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__a : Any = getattr(lowerCAmelCase__ , '''pooler''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__a : int = getattr(lowerCAmelCase__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__a : List[str] = getattr(lowerCAmelCase__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''token_type_embeddings''' )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append('''weight''' )
__a : Tuple = getattr(lowerCAmelCase__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''attention''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__a : int = getattr(lowerCAmelCase__ , '''attention''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''output''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''attention''' )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''output''' )
__a : Any = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__a : Tuple = getattr(lowerCAmelCase__ , '''output''' )
__a : str = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__a : int = getattr(lowerCAmelCase__ , '''output''' )
__a : str = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''intermediate''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__a : int = getattr(lowerCAmelCase__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__a : Dict = getattr(lowerCAmelCase__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''weight''' )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
__a : List[str] = '''.'''.join(lowerCAmelCase__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , lowerCAmelCase__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , lowerCAmelCase__ ):
__a : str = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__a : Optional[Any] = array.transpose()
if pointer.shape == array.shape:
__a : str = torch.from_numpy(lowerCAmelCase__ )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ):
# Instantiate model
logger.info(f"Loading model based on config from {config_path}..." )
__a : Dict = BertConfig.from_json_file(lowerCAmelCase__ )
__a : int = BertModel(lowerCAmelCase__ )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
lowercase__ =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 216
| 0
|
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1_000 ) -> str:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case_ : int = n - 1
snake_case_ : Union[str, Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case_ : List[Any] = 0
while count < prec:
snake_case_ : Tuple = random.randint(2 , n - 1 )
snake_case_ : List[Any] = bin_exp_mod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if b != 1:
snake_case_ : Any = True
for _ in range(_UpperCamelCase ):
if b == n - 1:
snake_case_ : Tuple = False
break
snake_case_ : List[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase_ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 279
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ : List[Any] = '''BlipImageProcessor'''
lowerCamelCase_ : Union[str, Any] = '''AutoTokenizer'''
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(__magic_name__ , __magic_name__ )
# add QFormer tokenizer
snake_case_ : Optional[Any] = qformer_tokenizer
def __call__(self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
snake_case_ : Tuple = BatchFeature()
if text is not None:
snake_case_ : Tuple = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
encoding.update(__magic_name__ )
snake_case_ : Optional[Any] = self.qformer_tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
snake_case_ : Optional[int] = qformer_text_encoding.pop('''input_ids''' )
snake_case_ : Tuple = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
snake_case_ : Any = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase (self , __magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case_ : Any = os.path.join(__magic_name__ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__magic_name__ )
return super().save_pretrained(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AutoTokenizer.from_pretrained(__magic_name__ , subfolder='''qformer_tokenizer''' )
snake_case_ : str = cls._get_arguments_from_pretrained(__magic_name__ , **__magic_name__ )
args.append(__magic_name__ )
return cls(*__magic_name__ )
| 279
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(SCREAMING_SNAKE_CASE_ ) , """Tatoeba directory does not exist.""" )
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->int:
a_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->Union[str, Any]:
self.resolver.convert_models(["heb-eng"])
@slow
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ , a_ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase)
assert mmeta["long_pair"] == "heb-eng"
| 243
|
"""simple docstring"""
import math
import os
import sys
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = ""
try:
with open(UpperCAmelCase , "rb" ) as binary_file:
a_ = binary_file.read()
for dat in data:
a_ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
lexicon.pop(UpperCAmelCase )
a_ = last_match_id
if math.loga(UpperCAmelCase ).is_integer():
for curr_key in lexicon:
a_ = "0" + lexicon[curr_key]
a_ = bin(UpperCAmelCase )[2:]
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = {"0": "0", "1": "1"}
a_ , a_ = "", ""
a_ = len(UpperCAmelCase )
for i in range(len(UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a_ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
index += 1
a_ = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
a_ = lexicon[curr_string]
result += last_match_id
return result
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = os.path.getsize(UpperCAmelCase )
a_ = bin(UpperCAmelCase )[2:]
a_ = len(UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = 8
try:
with open(UpperCAmelCase , "wb" ) as opened_file:
a_ = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(UpperCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = read_file_binary(UpperCAmelCase )
a_ = compress_data(UpperCAmelCase )
a_ = add_file_length(UpperCAmelCase , UpperCAmelCase )
write_file_binary(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 243
| 1
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase ( a , a ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase ( a ) -> list[str]:
'''simple docstring'''
__magic_name__ = []
__magic_name__ = 11
__magic_name__ = int('''1''' + '''0''' * digit_len )
for num in range(a , a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a , a ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__magic_name__ = 10
return solutions
def UpperCamelCase ( a = 2 ) -> int:
'''simple docstring'''
__magic_name__ = 1.0
for fraction in fraction_list(a ):
__magic_name__ = Fraction(a )
result *= frac.denominator / frac.numerator
return int(a )
if __name__ == "__main__":
print(solution())
| 359
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE :List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE :Any = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case__ ( self : Tuple , a__ : Tuple , a__ : int , a__ : int ):
__magic_name__ = TextaTextGenerationPipeline(model=a__ , tokenizer=a__ )
return generator, ["Something to write", "Something else"]
def snake_case__ ( self : List[str] , a__ : List[Any] , a__ : List[str] ):
__magic_name__ = generator('''Something there''' )
self.assertEqual(a__ , [{'''generated_text''': ANY(a__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
__magic_name__ = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{'''generated_text''': ANY(a__ )}, {'''generated_text''': ANY(a__ )}],
[{'''generated_text''': ANY(a__ )}, {'''generated_text''': ANY(a__ )}],
] , )
__magic_name__ = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{'''generated_text''': ANY(a__ )}, {'''generated_text''': ANY(a__ )}],
[{'''generated_text''': ANY(a__ )}, {'''generated_text''': ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
generator(4 )
@require_torch
def snake_case__ ( self : Any ):
__magic_name__ = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
__magic_name__ = generator('''Something there''' , do_sample=a__ )
self.assertEqual(a__ , [{'''generated_text''': ''''''}] )
__magic_name__ = 3
__magic_name__ = generator(
'''Something there''' , num_return_sequences=a__ , num_beams=a__ , )
__magic_name__ = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(a__ , a__ )
__magic_name__ = generator('''This is a test''' , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
__magic_name__ = generator.model.config.eos_token_id
__magic_name__ = '''<pad>'''
__magic_name__ = generator(
['''This is a test''', '''This is a second test'''] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case__ ( self : int ):
__magic_name__ = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
__magic_name__ = generator('''Something there''' , do_sample=a__ )
self.assertEqual(a__ , [{'''generated_text''': ''''''}] )
| 98
| 0
|
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCamelCase__ = '\\n Text data.\n Second line of data.'
lowerCamelCase__ = 'file'
@pytest.fixture(scope='session' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '''.zstd''')
__lowerCAmelCase : Union[str, Any] = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture
def __lowerCAmelCase (_UpperCamelCase ):
with open(os.path.join(tmpfs.local_root_dir , lowercase__ ) , 'w' ) as f:
f.write(lowercase__ )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[int] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__lowerCAmelCase : str = input_paths[compression_format]
__lowerCAmelCase : Tuple = tmp_path / '''cache'''
__lowerCAmelCase : Any = DownloadConfig(cache_dir=lowercase__ , extract_compressed_file=lowercase__ )
__lowerCAmelCase : Union[str, Any] = cached_path(lowercase__ , download_config=lowercase__ )
with open(lowercase__ ) as f:
__lowerCAmelCase : Any = f.read()
with open(lowercase__ ) as f:
__lowerCAmelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[str] = '''custom_cache'''
__lowerCAmelCase : Optional[Any] = '''custom_extracted_dir'''
__lowerCAmelCase : int = tmp_path / '''custom_extracted_path'''
if default_extracted:
__lowerCAmelCase : List[Any] = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , lowercase__ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowercase__ ) )
__lowerCAmelCase : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__lowerCAmelCase : int = xz_file
__lowerCAmelCase : int = (
DownloadConfig(extract_compressed_file=lowercase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase__ )
)
__lowerCAmelCase : Optional[int] = cached_path(lowercase__ , download_config=lowercase__ )
assert Path(lowercase__ ).parent.parts[-2:] == expected
def __lowerCAmelCase (_UpperCamelCase ):
# absolute path
__lowerCAmelCase : Dict = str(Path(lowercase__ ).resolve() )
assert cached_path(lowercase__ ) == text_file
# relative path
__lowerCAmelCase : str = str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase__ ) == text_file
def __lowerCAmelCase (_UpperCamelCase ):
# absolute path
__lowerCAmelCase : int = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
# relative path
__lowerCAmelCase : Dict = '''./__missing_file__.txt'''
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = get_from_cache(F"tmp://{tmpfs_file}" )
with open(lowercase__ ) as f:
__lowerCAmelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase__ )
def __lowerCAmelCase ():
with pytest.raises(lowercase__ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase__ )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / '''file.html'''
with pytest.raises(lowercase__ ):
http_get('https://huggingface.co' , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase__ )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = tmp_path_factory.mktemp('data' ) / '''file.html'''
with pytest.raises(lowercase__ ):
ftp_get('ftp://huggingface.co' , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase__ )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = tmp_path_factory.mktemp('data' ) / '''file.html'''
with pytest.raises(lowercase__ ):
fsspec_get('s3://huggingface.co' , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
fsspec_head('s3://huggingface.co' )
| 86
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _snake_case ( ):
_lowerCamelCase : Optional[int] = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
_lowerCamelCase : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
return image
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : int = dct.pop(lowercase__ )
_lowerCamelCase : Optional[int] = val
def _snake_case ( lowercase__ , lowercase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCamelCase : Optional[int] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
_lowerCamelCase : int = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
_lowerCamelCase : Tuple = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
_lowerCamelCase : List[Any] = qkv_bias
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[str] = 364 if 'coco' in model_name else 224
_lowerCamelCase : int = InstructBlipVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_lowerCamelCase : Union[str, Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCamelCase : Dict = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_lowerCamelCase : str = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
_lowerCamelCase : str = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=32001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_lowerCamelCase : Optional[int] = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
_lowerCamelCase : Optional[int] = InstructBlipConfig(vision_config=lowercase__ , text_config=lowercase__ , qformer_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _snake_case ( lowercase__ , lowercase__=None , lowercase__=False ):
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
_lowerCamelCase : List[Any] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_lowerCamelCase : int = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
_lowerCamelCase, _lowerCamelCase : Any = get_blipa_config(lowercase__ )
_lowerCamelCase : Dict = InstructBlipForConditionalGeneration(lowercase__ ).eval()
_lowerCamelCase : Optional[Any] = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
_lowerCamelCase, _lowerCamelCase : int = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
_lowerCamelCase : Dict = 'cuda:1' if torch.cuda.is_available() else 'cpu'
_lowerCamelCase : int = 'cuda:2' if torch.cuda.is_available() else 'cpu'
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print('Done!' )
# update state dict keys
_lowerCamelCase : List[Any] = original_model.state_dict()
_lowerCamelCase : str = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCamelCase : List[str] = state_dict.pop(lowercase__ )
if key.startswith('Qformer.bert' ):
_lowerCamelCase : Optional[Any] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
_lowerCamelCase : Optional[int] = key.replace('self' , 'attention' )
if "llm_proj" in key:
_lowerCamelCase : Dict = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
_lowerCamelCase : List[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
_lowerCamelCase : int = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
_lowerCamelCase : Optional[Any] = key.replace('t5' , 'language' )
_lowerCamelCase : Optional[int] = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
_lowerCamelCase : Any = load_demo_image()
_lowerCamelCase : str = 'What is unusual about this image?'
# create processor
_lowerCamelCase : List[str] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase__ , image_std=lowercase__ )
_lowerCamelCase : List[Any] = InstructBlipProcessor(
image_processor=lowercase__ , tokenizer=lowercase__ , qformer_tokenizer=lowercase__ , )
_lowerCamelCase : List[str] = processor(images=lowercase__ , text=lowercase__ , return_tensors='pt' ).to(lowercase__ )
# make sure processor creates exact same pixel values
_lowerCamelCase : List[Any] = vis_processors['eval'](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
_lowerCamelCase : Any = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "vicuna" in model_name:
_lowerCamelCase : List[Any] = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
_lowerCamelCase : List[str] = hf_model(**lowercase__ ).logits
else:
_lowerCamelCase : Dict = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
_lowerCamelCase : Tuple = tokenizer('\n' , return_tensors='pt' ).input_ids.to(lowercase__ )
_lowerCamelCase : List[str] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
_lowerCamelCase : Dict = hf_model(**lowercase__ , labels=lowercase__ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_lowerCamelCase : str = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , lowercase__ , atol=lowercase__ )
print('Looks ok!' )
print('Generating with original model...' )
_lowerCamelCase : str = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
_lowerCamelCase : Optional[int] = hf_model.generate(
**lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_lowerCamelCase : Tuple = 2
print('Original generation:' , lowercase__ )
_lowerCamelCase : Tuple = processor.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )
_lowerCamelCase : List[Any] = [text.strip() for text in output_text]
print('HF generation:' , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
lowercase__ = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
lowercase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 12
|
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ ):
if not input_list:
return []
_lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list]
_lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
| 1
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = VideoMAEConfig()
set_architecture_configs(snake_case__ , snake_case__ )
if "finetuned" not in model_name:
A : int = False
if "finetuned" in model_name:
A : List[Any] = '''huggingface/label-files'''
if "kinetics" in model_name:
A : int = 400
A : str = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
A : Dict = 174
A : Dict = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
A : Optional[int] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
A : int = {int(snake_case__ ): v for k, v in idalabel.items()}
A : Union[str, Any] = idalabel
A : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if "small" in model_name:
A : int = 384
A : Tuple = 1536
A : Optional[Any] = 12
A : List[str] = 16
A : Optional[Any] = 12
A : Optional[Any] = 3
A : List[Any] = 192
A : List[str] = 768
elif "large" in model_name:
A : Optional[int] = 1024
A : Optional[Any] = 4096
A : Tuple = 24
A : List[Any] = 16
A : Tuple = 12
A : Union[str, Any] = 8
A : Optional[int] = 512
A : str = 2048
elif "huge" in model_name:
A : List[Any] = 1280
A : Tuple = 5120
A : Tuple = 32
A : Optional[Any] = 16
A : Dict = 12
A : List[str] = 8
A : str = 640
A : Optional[int] = 2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if "encoder." in name:
A : List[str] = name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
A : Any = name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
A : Dict = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
A : Tuple = name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
A : List[str] = name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A : int = name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
A : List[Any] = name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
A : Union[str, Any] = name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
A : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
A : str = name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
A : Any = name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
A : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A : List[str] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A : Dict = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
A : Tuple = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
A : int = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
A : List[str] = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
A : List[str] = name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
A : str = name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
A : Tuple = name.replace('''head''' , '''classifier''' )
return name
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A : List[Any] = orig_state_dict.pop(snake_case__ )
if key.startswith('''encoder.''' ):
A : str = key.replace('''encoder.''' , '''''' )
if "qkv" in key:
A : List[Any] = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
A : str = config.decoder_hidden_size
A : List[str] = int(key_split[2] )
A : Any = '''decoder.decoder_layers.'''
if "weight" in key:
A : Optional[Any] = val[:dim, :]
A : Union[str, Any] = val[dim : dim * 2, :]
A : List[str] = val[-dim:, :]
else:
A : str = config.hidden_size
A : str = int(key_split[1] )
A : Dict = '''videomae.encoder.layer.'''
if "weight" in key:
A : str = val[:dim, :]
A : Tuple = val[dim : dim * 2, :]
A : List[str] = val[-dim:, :]
else:
A : Union[str, Any] = val
return orig_state_dict
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Dict = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
A : Any = np.load(snake_case__ )
return list(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = get_videomae_config(snake_case__ )
if "finetuned" in model_name:
A : int = VideoMAEForVideoClassification(snake_case__ )
else:
A : List[str] = VideoMAEForPreTraining(snake_case__ )
# download original checkpoint, hosted on Google Drive
A : str = '''pytorch_model.bin'''
gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ )
A : Tuple = torch.load(snake_case__ , map_location='''cpu''' )
if "model" in files:
A : Optional[Any] = files['''model''']
else:
A : Tuple = files['''module''']
A : Dict = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify model on basic input
A : Union[str, Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
A : Any = prepare_video()
A : Union[str, Any] = image_processor(snake_case__ , return_tensors='''pt''' )
if "finetuned" not in model_name:
A : str = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
A : Optional[Any] = torch.load(snake_case__ )
A : List[str] = model(**snake_case__ )
A : Optional[int] = outputs.logits
A : List[str] = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
A : int = torch.Size([1, 400] )
A : List[Any] = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
A : Any = torch.Size([1, 174] )
A : Union[str, Any] = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
A : List[str] = torch.Size([1, 1408, 1536] )
A : Union[str, Any] = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
A : List[str] = torch.Size([1, 1408, 1536] )
A : Optional[Any] = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
A : List[str] = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
A : List[str] = torch.Size([1, 1408, 1536] )
A : Any = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
A : Any = torch.Size([1, 400] )
A : str = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
A : Optional[int] = torch.Size([1, 400] )
A : str = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
A : List[Any] = torch.Size([1, 400] )
A : List[Any] = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
A : Union[str, Any] = torch.Size([1, 400] )
A : List[Any] = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
A : Any = torch.Size([1, 1408, 1536] )
A : List[str] = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
A : Optional[Any] = torch.Size([1, 174] )
A : Optional[Any] = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
A : str = torch.Size([1, 1408, 1536] )
A : Union[str, Any] = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
A : Optional[int] = torch.Size([1, 174] )
A : str = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(F'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1E-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
A : List[str] = outputs.loss
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.save_pretrained(snake_case__ )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(snake_case__ , organization='''nielsr''' )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase : List[Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 3
|
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase : Union[str, Any] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ ( snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : Tuple = create_model(
'''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = {}
A : str = R'''.*sequential.(\d+).*'''
A : Union[str, Any] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A : Any = key.replace(snake_case__ , snake_case__ )
if re.match(snake_case__ , snake_case__ ):
# replace sequential layers with list
A : Any = re.match(snake_case__ , snake_case__ ).group(1 )
A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' )
elif re.match(snake_case__ , snake_case__ ):
A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
A : str = 1 if projecton_layer == 0 else 2
A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
A : int = value
A : List[Any] = mixed_qkv.size(0 ) // 3
A : Union[str, Any] = mixed_qkv[:qkv_dim]
A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
A : Optional[int] = mixed_qkv[qkv_dim * 2 :]
A : Tuple = query_layer
A : Union[str, Any] = key_layer
A : Optional[int] = value_layer
else:
A : Dict = value
return model_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ )
clap_model.eval()
A : str = clap_model.state_dict()
A : Union[str, Any] = rename_state_dict(snake_case__ )
A : Tuple = ClapConfig()
A : str = enable_fusion
A : str = ClapModel(snake_case__ )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case__ , strict=snake_case__ )
model.save_pretrained(snake_case__ )
transformers_config.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowercase : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 3
| 1
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=32 * 8 , __UpperCAmelCase=32 * 8 , __UpperCAmelCase=4 , __UpperCAmelCase=64 , ) -> Optional[int]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =is_training
_lowerCAmelCase =use_auxiliary_loss
_lowerCAmelCase =num_queries
_lowerCAmelCase =num_channels
_lowerCAmelCase =min_size
_lowerCAmelCase =max_size
_lowerCAmelCase =num_labels
_lowerCAmelCase =hidden_dim
_lowerCAmelCase =hidden_dim
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_A )
_lowerCAmelCase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=_A )
_lowerCAmelCase =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_A ) > 0.5
).float()
_lowerCAmelCase =(torch.rand((self.batch_size, self.num_labels) , device=_A ) > 0.5).long()
_lowerCAmelCase =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_lowerCAmelCase =self.num_queries
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =[1, 1, 1, 1]
_lowerCAmelCase =self.num_channels
_lowerCAmelCase =64
_lowerCAmelCase =1_28
_lowerCAmelCase =self.hidden_dim
_lowerCAmelCase =self.hidden_dim
_lowerCAmelCase =self.hidden_dim
return config
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_lowerCAmelCase =output.encoder_hidden_states
_lowerCAmelCase =output.pixel_decoder_hidden_states
_lowerCAmelCase =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , config.decoder_layers )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> List[str]:
with torch.no_grad():
_lowerCAmelCase =MaskaFormerModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase =model(pixel_values=_A , pixel_mask=_A )
_lowerCAmelCase =model(_A , output_hidden_states=_A )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_A , _A )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =MaskaFormerForUniversalSegmentation(config=_A )
model.to(_A )
model.eval()
def comm_check_on_output(__UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCAmelCase =model(pixel_values=_A , pixel_mask=_A )
_lowerCAmelCase =model(_A )
comm_check_on_output(_A )
_lowerCAmelCase =model(
pixel_values=_A , pixel_mask=_A , mask_labels=_A , class_labels=_A )
comm_check_on_output(_A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =MaskaFormerModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=_A , has_text_modality=_A )
def _lowerCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_A , **_A , output_hidden_states=_A )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_A )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def _lowerCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def _lowerCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def _lowerCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def _lowerCAmelCase ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" )
def _lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(_A )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_lowerCAmelCase =MaskaFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =(self.model_tester.min_size,) * 2
_lowerCAmelCase ={
'pixel_values': torch.randn((2, 3, *size) , device=_A ),
'mask_labels': torch.randn((2, 10, *size) , device=_A ),
'class_labels': torch.zeros(2 , 10 , device=_A ).long(),
}
_lowerCAmelCase =self.model_tester.get_config()
_lowerCAmelCase =MaskaFormerForUniversalSegmentation(_A ).to(_A )
_lowerCAmelCase =model(**_A )
self.assertTrue(outputs.loss is not None )
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_A , **_A , output_hidden_states=_A )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(_A ).to(_A )
_lowerCAmelCase =model(**_A , output_attentions=_A )
self.assertTrue(outputs.attentions is not None )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
_lowerCAmelCase =self.all_model_classes[1]
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase =model_class(_A )
model.to(_A )
model.train()
_lowerCAmelCase =model(_A , mask_labels=_A , class_labels=_A ).loss
loss.backward()
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.all_model_classes[1]
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase =True
_lowerCAmelCase =True
_lowerCAmelCase =model_class(_A ).to(_A )
model.train()
_lowerCAmelCase =model(_A , mask_labels=_A , class_labels=_A )
_lowerCAmelCase =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCAmelCase =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_lowerCAmelCase =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCAmelCase =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1E-4
def _lowerCamelCase() -> List[Any]:
_lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> Any:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _lowerCAmelCase ( self ) -> Any:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_A )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(_A , return_tensors="""pt""" ).to(_A )
_lowerCAmelCase =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_lowerCAmelCase =model(**_A )
_lowerCAmelCase =torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
_lowerCAmelCase =torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
_lowerCAmelCase =torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _A , atol=_A ) )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_A ).eval()
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(_A , return_tensors="""pt""" ).to(_A )
_lowerCAmelCase =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_lowerCAmelCase =model(**_A )
# masks_queries_logits
_lowerCAmelCase =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_lowerCAmelCase =[
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_lowerCAmelCase =torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A ) )
# class_queries_logits
_lowerCAmelCase =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase =torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A ) )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_A ).eval()
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
_lowerCAmelCase =inputs['pixel_values'].to(_A )
_lowerCAmelCase =[el.to(_A ) for el in inputs['mask_labels']]
_lowerCAmelCase =[el.to(_A ) for el in inputs['class_labels']]
with torch.no_grad():
_lowerCAmelCase =model(**_A )
self.assertTrue(outputs.loss is not None )
| 357
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 341
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCAmelCase_ = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase :
def __init__(self , __magic_name__=False , __magic_name__=False , __magic_name__=6.0 , __magic_name__=None , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__="fp4" , __magic_name__=False , **__magic_name__ , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = load_in_abit
snake_case_ : Optional[int] = load_in_abit
snake_case_ : str = llm_inta_threshold
snake_case_ : List[str] = llm_inta_skip_modules
snake_case_ : Optional[int] = llm_inta_enable_fpaa_cpu_offload
snake_case_ : List[Any] = llm_inta_has_fpaa_weight
snake_case_ : int = bnb_abit_quant_type
snake_case_ : List[Any] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
snake_case_ : Tuple = torch.floataa
elif isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : List[str] = getattr(__magic_name__ , __magic_name__ )
elif isinstance(__magic_name__ , torch.dtype ):
snake_case_ : Any = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def lowerCamelCase (self ) -> str:
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , __magic_name__ ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __magic_name__ ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __magic_name__ ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , __magic_name__ ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , __magic_name__ ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , __magic_name__ ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Tuple = cls(**__magic_name__ )
snake_case_ : Dict = []
for key, value in kwargs.items():
if hasattr(__magic_name__ , __magic_name__ ):
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
to_remove.append(__magic_name__ )
for key in to_remove:
kwargs.pop(__magic_name__ , __magic_name__ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
snake_case_ : Tuple = self.to_dict()
snake_case_ : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + '''\n'''
writer.write(__magic_name__ )
def lowerCamelCase (self ) -> Dict[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : Any = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__(self ) -> Optional[Any]:
'''simple docstring'''
return F'''{self.__class__.__name__} {self.to_json_string()}'''
def lowerCamelCase (self , __magic_name__ = True ) -> str:
'''simple docstring'''
if use_diff is True:
snake_case_ : Tuple = self.to_diff_dict()
else:
snake_case_ : List[Any] = self.to_dict()
return json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n"
def lowerCamelCase (self ) -> Dict[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.to_dict()
# get the default config dict
snake_case_ : Dict = BitsAndBytesConfig().to_dict()
snake_case_ : str = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
snake_case_ : Optional[int] = value
return serializable_config_dict
| 279
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Any = BioGptTokenizer
lowerCamelCase_ : Optional[Any] = False
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : str = '''lower newer'''
snake_case_ : Dict = '''lower newer'''
return input_text, output_text
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
snake_case_ : Union[str, Any] = '''lower'''
snake_case_ : Optional[int] = ['''low''', '''er</w>''']
snake_case_ : Any = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = tokens + ['''<unk>''']
snake_case_ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 279
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : str = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
a = '''The dog is cute and lives in the garden house'''
a = jnp.array([tokenizer.encode(__lowerCamelCase )] )
a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
a = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
a = model(__lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape ,__lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
| 330
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.