code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_lowerCamelCase = logging.getLogger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Any ="summarization"
__A : Dict =["loss"]
__A : Dict =ROUGE_KEYS
__A : Union[str, Any] ="rouge2"
def __init__( self ,_snake_case ,**_snake_case ):
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase_ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(_snake_case ,num_labels=_snake_case ,mode=self.mode ,**_snake_case )
use_task_specific_params(self.model ,"summarization" )
save_git_info(self.hparams.output_dir )
UpperCAmelCase_ : List[Any] = Path(self.output_dir ) / "metrics.json"
UpperCAmelCase_ : Union[str, Any] = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams ,self.hparams_save_path )
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : int = defaultdict(_snake_case )
UpperCAmelCase_ : str = self.config.model_type
UpperCAmelCase_ : int = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
UpperCAmelCase_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase_ : int = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
UpperCAmelCase_ : Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase_ : int = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCAmelCase_ : Union[str, Any] = get_git_info()["repo_sha"]
UpperCAmelCase_ : Dict = hparams.num_workers
UpperCAmelCase_ : Optional[int] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,_snake_case ):
UpperCAmelCase_ : str = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase_ : Any = self.decoder_start_token_id
UpperCAmelCase_ : Optional[Any] = (
SeqaSeqDataset if hasattr(self.tokenizer ,"prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Optional[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase_ : Tuple = self.hparams.eval_max_gen_length
else:
UpperCAmelCase_ : Union[str, Any] = self.model.config.max_length
UpperCAmelCase_ : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[str] = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(_snake_case ,Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir ) / "tok_batch.json" )
UpperCAmelCase_ : Optional[Any] = True
return readable_batch
def UpperCamelCase__ ( self ,_snake_case ,**_snake_case ):
return self.model(_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(
_snake_case ,skip_special_tokens=_snake_case ,clean_up_tokenization_spaces=_snake_case )
return lmap(str.strip ,_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = self.tokenizer.pad_token_id
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch["input_ids"], batch["attention_mask"]
UpperCAmelCase_ : Optional[Any] = batch["labels"]
if isinstance(self.model ,_snake_case ):
UpperCAmelCase_ : str = self.model._shift_right(_snake_case )
else:
UpperCAmelCase_ : int = shift_tokens_right(_snake_case ,_snake_case )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase_ : Tuple = decoder_input_ids
self.save_readable_batch(_snake_case )
UpperCAmelCase_ : int = self(_snake_case ,attention_mask=_snake_case ,decoder_input_ids=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ : Optional[int] = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase_ : List[Any] = nn.CrossEntropyLoss(ignore_index=_snake_case )
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase_ : List[str] = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1] ) ,tgt_ids.view(-1 ) )
else:
UpperCAmelCase_ : Any = nn.functional.log_softmax(_snake_case ,dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = label_smoothed_nll_loss(
_snake_case ,_snake_case ,self.hparams.label_smoothing ,ignore_index=_snake_case )
return (loss,)
@property
def UpperCamelCase__ ( self ):
return self.tokenizer.pad_token_id
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = self._step(_snake_case )
UpperCAmelCase_ : Any = dict(zip(self.loss_names ,_snake_case ) )
# tokens per batch
UpperCAmelCase_ : Optional[int] = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
UpperCAmelCase_ : Tuple = batch["input_ids"].shape[0]
UpperCAmelCase_ : List[Any] = batch["input_ids"].eq(self.pad ).sum()
UpperCAmelCase_ : List[Any] = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
return self._generative_step(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="val" ):
self.step_count += 1
UpperCAmelCase_ : Optional[int] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCAmelCase_ : Tuple = losses["loss"]
UpperCAmelCase_ : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
UpperCAmelCase_ : List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase_ : torch.FloatTensor = torch.tensor(_snake_case ).type_as(_snake_case )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_snake_case )
UpperCAmelCase_ : str = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
UpperCAmelCase_ : Dict = self.step_count
self.metrics[prefix].append(_snake_case ) # callback writes this to self.metrics_save_path
UpperCAmelCase_ : Optional[int] = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
return calculate_rouge(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : int = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase_ : Optional[Any] = self.model.generate(
batch["input_ids"] ,attention_mask=batch["attention_mask"] ,use_cache=_snake_case ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,)
UpperCAmelCase_ : str = (time.time() - ta) / batch["input_ids"].shape[0]
UpperCAmelCase_ : List[str] = self.ids_to_clean_text(_snake_case )
UpperCAmelCase_ : List[str] = self.ids_to_clean_text(batch["labels"] )
UpperCAmelCase_ : Tuple = self._step(_snake_case )
UpperCAmelCase_ : List[Any] = dict(zip(self.loss_names ,_snake_case ) )
UpperCAmelCase_ : Dict = self.calc_generative_metrics(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = np.mean(lmap(_snake_case ,_snake_case ) )
base_metrics.update(gen_time=_snake_case ,gen_len=_snake_case ,preds=_snake_case ,target=_snake_case ,**_snake_case )
return base_metrics
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
return self._generative_step(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
return self.validation_epoch_end(_snake_case ,prefix="test" )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Any = self.n_obs[type_path]
UpperCAmelCase_ : List[str] = self.target_lens[type_path]
UpperCAmelCase_ : Union[str, Any] = self.dataset_class(
self.tokenizer ,type_path=_snake_case ,n_obs=_snake_case ,max_target_length=_snake_case ,**self.dataset_kwargs ,)
return dataset
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = False ):
UpperCAmelCase_ : Any = self.get_dataset(_snake_case )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase_ : int = dataset.make_sortish_sampler(_snake_case ,distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case ,batch_size=_snake_case ,collate_fn=dataset.collate_fn ,shuffle=_snake_case ,num_workers=self.num_workers ,sampler=_snake_case ,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase_ : List[Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case ,batch_sampler=_snake_case ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,)
else:
return DataLoader(
_snake_case ,batch_size=_snake_case ,collate_fn=dataset.collate_fn ,shuffle=_snake_case ,num_workers=self.num_workers ,sampler=_snake_case ,)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_dataloader("train" ,batch_size=self.hparams.train_batch_size ,shuffle=_snake_case )
return dataloader
def UpperCamelCase__ ( self ):
return self.get_dataloader("val" ,batch_size=self.hparams.eval_batch_size )
def UpperCamelCase__ ( self ):
return self.get_dataloader("test" ,batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase__ ( _snake_case ,_snake_case ):
BaseTransformer.add_model_specific_args(_snake_case ,_snake_case )
add_generic_args(_snake_case ,_snake_case )
parser.add_argument(
"--max_source_length" ,default=10_24 ,type=_snake_case ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument(
"--max_target_length" ,default=56 ,type=_snake_case ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument(
"--val_max_target_length" ,default=1_42 ,type=_snake_case ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument(
"--test_max_target_length" ,default=1_42 ,type=_snake_case ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument("--freeze_encoder" ,action="store_true" )
parser.add_argument("--freeze_embeds" ,action="store_true" )
parser.add_argument("--sortish_sampler" ,action="store_true" ,default=_snake_case )
parser.add_argument("--overwrite_output_dir" ,action="store_true" ,default=_snake_case )
parser.add_argument("--max_tokens_per_batch" ,type=_snake_case ,default=_snake_case )
parser.add_argument("--logger_name" ,type=_snake_case ,choices=["default", "wandb", "wandb_shared"] ,default="default" )
parser.add_argument("--n_train" ,type=_snake_case ,default=-1 ,required=_snake_case ,help="# examples. -1 means use all." )
parser.add_argument("--n_val" ,type=_snake_case ,default=5_00 ,required=_snake_case ,help="# examples. -1 means use all." )
parser.add_argument("--n_test" ,type=_snake_case ,default=-1 ,required=_snake_case ,help="# examples. -1 means use all." )
parser.add_argument(
"--task" ,type=_snake_case ,default="summarization" ,required=_snake_case ,help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" ,type=_snake_case ,default=0.0 ,required=_snake_case )
parser.add_argument("--src_lang" ,type=_snake_case ,default="" ,required=_snake_case )
parser.add_argument("--tgt_lang" ,type=_snake_case ,default="" ,required=_snake_case )
parser.add_argument("--eval_beams" ,type=_snake_case ,default=_snake_case ,required=_snake_case )
parser.add_argument(
"--val_metric" ,type=_snake_case ,default=_snake_case ,required=_snake_case ,choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" ,type=_snake_case ,default=_snake_case ,help="never generate more than n tokens" )
parser.add_argument("--save_top_k" ,type=_snake_case ,default=1 ,required=_snake_case ,help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" ,type=_snake_case ,default=-1 ,required=_snake_case ,help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) ,)
return parser
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Any ="translation"
__A : Optional[int] =["loss"]
__A : str =["bleu"]
__A : int ="bleu"
def __init__( self ,_snake_case ,**_snake_case ):
super().__init__(_snake_case ,**_snake_case )
UpperCAmelCase_ : Optional[int] = hparams.src_lang
UpperCAmelCase_ : Optional[Any] = hparams.tgt_lang
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
return calculate_bleu(_snake_case ,_snake_case )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
check_output_dir(_SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase_ : SummarizationModule = SummarizationModule(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : SummarizationModule = TranslationModule(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
UpperCAmelCase_ : Tuple = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ : str = os.environ.get("WANDB_PROJECT" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = WandbLogger(name=model.output_dir.name , project=_SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ : List[Any] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
UpperCAmelCase_ : Optional[Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : List[Any] = args.val_metric == "loss"
UpperCAmelCase_ : pl.Trainer = generic_train(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _SCREAMING_SNAKE_CASE ) , early_stopping_callback=_SCREAMING_SNAKE_CASE , logger=_SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
UpperCAmelCase_ : Tuple = ""
UpperCAmelCase_ : str = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=_SCREAMING_SNAKE_CASE ) )
if checkpoints:
UpperCAmelCase_ : Optional[Any] = checkpoints[-1]
UpperCAmelCase_ : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
_lowerCamelCase = pl.Trainer.add_argparse_args(parser)
_lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_lowerCamelCase = parser.parse_args()
main(args)
| 71
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def a (lowerCAmelCase__ ):
__a = VideoMAEConfig()
set_architecture_configs(lowerCAmelCase__ , lowerCAmelCase__ )
if "finetuned" not in model_name:
__a = False
if "finetuned" in model_name:
__a = """huggingface/label-files"""
if "kinetics" in model_name:
__a = 400
__a = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__a = 174
__a = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__a = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
__a = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if "small" in model_name:
__a = 384
__a = 1_536
__a = 12
__a = 16
__a = 12
__a = 3
__a = 192
__a = 768
elif "large" in model_name:
__a = 1_024
__a = 4_096
__a = 24
__a = 16
__a = 12
__a = 8
__a = 512
__a = 2_048
elif "huge" in model_name:
__a = 1_280
__a = 5_120
__a = 32
__a = 16
__a = 12
__a = 8
__a = 640
__a = 2_560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def a (lowerCAmelCase__ ):
if "encoder." in name:
__a = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__a = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__a = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__a = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__a = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__a = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__a = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__a = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__a = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__a = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__a = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__a = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__a = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__a = name.replace("""head""" , """classifier""" )
return name
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(lowerCAmelCase__ )
if key.startswith("""encoder.""" ):
__a = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__a = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__a = config.decoder_hidden_size
__a = int(key_split[2] )
__a = """decoder.decoder_layers."""
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = config.hidden_size
__a = int(key_split[1] )
__a = """videomae.encoder.layer."""
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val
return orig_state_dict
def a ():
__a = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__a = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = get_videomae_config(lowerCAmelCase__ )
if "finetuned" in model_name:
__a = VideoMAEForVideoClassification(lowerCAmelCase__ )
else:
__a = VideoMAEForPreTraining(lowerCAmelCase__ )
# download original checkpoint, hosted on Google Drive
__a = """pytorch_model.bin"""
gdown.cached_download(lowerCAmelCase__ , lowerCAmelCase__ , quiet=lowerCAmelCase__ )
__a = torch.load(lowerCAmelCase__ , map_location="""cpu""" )
if "model" in files:
__a = files["""model"""]
else:
__a = files["""module"""]
__a = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# verify model on basic input
__a = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__a = prepare_video()
__a = image_processor(lowerCAmelCase__ , return_tensors="""pt""" )
if "finetuned" not in model_name:
__a = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__a = torch.load(lowerCAmelCase__ )
__a = model(**lowerCAmelCase__ )
__a = outputs.logits
__a = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__a = torch.Size([1, 400] )
__a = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__a = torch.Size([1, 174] )
__a = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__a = torch.Size([1, 1_408, 1_536] )
__a = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__a = torch.Size([1, 1_408, 1_536] )
__a = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__a = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__a = torch.Size([1, 1_408, 1_536] )
__a = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__a = torch.Size([1, 400] )
__a = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__a = torch.Size([1, 400] )
__a = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__a = torch.Size([1, 400] )
__a = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__a = torch.Size([1, 400] )
__a = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__a = torch.Size([1, 1_408, 1_536] )
__a = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__a = torch.Size([1, 174] )
__a = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__a = torch.Size([1, 1_408, 1_536] )
__a = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__a = torch.Size([1, 174] )
__a = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__a = outputs.loss
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 99
| 0
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
snake_case_ = (DDIMParallelScheduler,)
snake_case_ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def lowercase_ ( self , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**lowerCamelCase__ )
return config
def lowercase_ ( self , **lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(**lowerCamelCase__ )
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
__lowerCamelCase = 10, 0.0
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for t in scheduler.timesteps:
__lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(steps_offset=1 )
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def lowercase_ ( self ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase__ , eta=lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**lowerCamelCase__ )
__lowerCamelCase = 10, 0.0
scheduler.set_timesteps(lowerCamelCase__ )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter
__lowerCamelCase = self.dummy_sample_deter + 0.1
__lowerCamelCase = self.dummy_sample_deter - 0.1
__lowerCamelCase = samplea.shape[0]
__lowerCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCamelCase = torch.arange(lowerCamelCase__ )[0:3, None].repeat(1 , lowerCamelCase__ )
__lowerCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCamelCase = scheduler.batch_step_no_noise(lowerCamelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCamelCase__ )
__lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.full_loop()
__lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.full_loop(prediction_type='v_prediction' )
__lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# We specify different beta, so that the first alpha is 0.99
__lowerCamelCase = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.01 )
__lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# We specify different beta, so that the first alpha is 0.99
__lowerCamelCase = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.01 )
__lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 719
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
snake_case_ = '''maskformer-swin'''
snake_case_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCamelCase__=224 , lowerCamelCase__=4 , lowerCamelCase__=3 , lowerCamelCase__=96 , lowerCamelCase__=[2, 2, 6, 2] , lowerCamelCase__=[3, 6, 12, 24] , lowerCamelCase__=7 , lowerCamelCase__=4.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(lowerCamelCase__ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
__lowerCamelCase = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
__lowerCamelCase , __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 167
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = ''''''
else:
snake_case__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = dct.pop(__lowerCAmelCase )
snake_case__ = val
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = ViTConfig()
snake_case__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ = True
snake_case__ = int(vit_name[-12:-10] )
snake_case__ = int(vit_name[-9:-6] )
else:
snake_case__ = 1000
snake_case__ = '''huggingface/label-files'''
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(vit_name[-6:-4] )
snake_case__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif vit_name[9:].startswith('''small''' ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case__ = 768
snake_case__ = 2304
snake_case__ = 8
snake_case__ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
elif vit_name[4:].startswith('''huge''' ):
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ = ViTModel(__lowerCAmelCase ).eval()
else:
snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ = ViTImageProcessor(size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ = encoding['''pixel_values''']
snake_case__ = model(__lowerCAmelCase )
if base_model:
snake_case__ = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 33
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE_ : str = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
__lowercase = {}
state_dict.pop('pixel_mean' , snake_case )
state_dict.pop('pixel_std' , snake_case )
__lowercase = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowercase = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
__lowercase = int(re.match(snake_case , snake_case ).group(2 ) )
if layer_nb == 0:
__lowercase = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
__lowercase = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
__lowercase = key.replace('layers.2' , 'proj_out' )
__lowercase = value
__lowercase = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case="ybelkada/segment-anything" ) -> int:
__lowercase = hf_hub_download(snake_case , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
__lowercase = SamConfig()
elif "sam_vit_l" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__lowercase = SamConfig(
vision_config=snake_case , )
elif "sam_vit_h" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__lowercase = SamConfig(
vision_config=snake_case , )
__lowercase = torch.load(snake_case , map_location='cpu' )
__lowercase = replace_keys(snake_case )
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(image_processor=snake_case )
__lowercase = SamModel(snake_case )
hf_model.load_state_dict(snake_case )
__lowercase = hf_model.to('cuda' )
__lowercase = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
__lowercase = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('RGB' )
__lowercase = [[[400, 650]]]
__lowercase = [[1]]
__lowercase = processor(images=np.array(snake_case ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__lowercase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__lowercase = ((75, 275, 1_725, 850),)
__lowercase = processor(images=np.array(snake_case ) , input_boxes=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__lowercase = [[[400, 650], [800, 650]]]
__lowercase = [[1, 1]]
__lowercase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 375
| 0
|
import math
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return math.pow(lowerCAmelCase_ , 2) - a
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 2 * x
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = 2.0
while start <= a:
lowerCamelCase_ : List[Any] = math.pow(lowerCAmelCase_ , 2)
return start
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 9999 , lowerCAmelCase_ = 0.00_00_00_00_00_00_01):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error")
lowerCamelCase_ : List[str] = get_initial_point(lowerCAmelCase_)
for _ in range(lowerCAmelCase_):
lowerCamelCase_ : Tuple = value
lowerCamelCase_ : Optional[int] = value - fx(lowerCAmelCase_ , lowerCAmelCase_) / fx_derivative(lowerCAmelCase_)
if abs(prev_value - value) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 73
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73
| 1
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Dict = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_lowerCamelCase : str = UNetaDConditionModel(
sample_size=32 ,layers_per_block=1 ,block_out_channels=[32, 64] ,down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] ,mid_block_type="UNetMidBlock2DSimpleCrossAttn" ,up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] ,in_channels=3 ,out_channels=6 ,cross_attention_dim=32 ,encoder_hid_dim=32 ,attention_head_dim=8 ,addition_embed_type="text" ,addition_embed_type_num_heads=2 ,cross_attention_norm="group_norm" ,resnet_time_scale_shift="scale_shift" ,act_fn="gelu" ,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1_000 ,beta_schedule="squaredcos_cap_v2" ,beta_start=0.00_01 ,beta_end=0.02 ,thresholding=__lowerCAmelCase ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type="epsilon" ,variance_type="learned_range" ,)
torch.manual_seed(0 )
_lowerCamelCase : Tuple = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Dict = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = UNetaDConditionModel(
sample_size=32 ,layers_per_block=[1, 2] ,block_out_channels=[32, 64] ,down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] ,mid_block_type="UNetMidBlock2DSimpleCrossAttn" ,up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] ,in_channels=6 ,out_channels=6 ,cross_attention_dim=32 ,encoder_hid_dim=32 ,attention_head_dim=8 ,addition_embed_type="text" ,addition_embed_type_num_heads=2 ,cross_attention_norm="group_norm" ,resnet_time_scale_shift="scale_shift" ,act_fn="gelu" ,class_embed_type="timestep" ,mid_block_scale_factor=1.4_14 ,time_embedding_act_fn="gelu" ,time_embedding_dim=32 ,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_lowerCamelCase : List[str] = DDPMScheduler(
num_train_timesteps=1_000 ,beta_schedule="squaredcos_cap_v2" ,beta_start=0.00_01 ,beta_end=0.02 ,thresholding=__lowerCAmelCase ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type="epsilon" ,variance_type="learned_range" ,)
torch.manual_seed(0 )
_lowerCamelCase : Any = DDPMScheduler(
num_train_timesteps=1_000 ,beta_schedule="squaredcos_cap_v2" ,beta_start=0.00_01 ,beta_end=0.02 ,)
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.get_dummy_components()
_lowerCamelCase : Union[str, Any] = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs["prompt"]
_lowerCamelCase : List[Any] = inputs["generator"]
_lowerCamelCase : str = inputs["num_inference_steps"]
_lowerCamelCase : Optional[Any] = inputs["output_type"]
if "image" in inputs:
_lowerCamelCase : Optional[Any] = inputs["image"]
else:
_lowerCamelCase : Optional[Any] = None
if "mask_image" in inputs:
_lowerCamelCase : Dict = inputs["mask_image"]
else:
_lowerCamelCase : Optional[Any] = None
if "original_image" in inputs:
_lowerCamelCase : Tuple = inputs["original_image"]
else:
_lowerCamelCase : Dict = None
_lowerCamelCase, _lowerCamelCase : int = pipe.encode_prompt(__lowerCAmelCase )
# inputs with prompt converted to embeddings
_lowerCamelCase : str = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
_lowerCamelCase : Union[str, Any] = image
if mask_image is not None:
_lowerCamelCase : Union[str, Any] = mask_image
if original_image is not None:
_lowerCamelCase : Tuple = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase ,__lowerCAmelCase ) is None ,F"""`{optional_component}` did not stay set to None after loading.""" ,)
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Tuple = inputs["generator"]
_lowerCamelCase : Optional[int] = inputs["num_inference_steps"]
_lowerCamelCase : List[Any] = inputs["output_type"]
# inputs with prompt converted to embeddings
_lowerCamelCase : str = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
_lowerCamelCase : Tuple = image
if mask_image is not None:
_lowerCamelCase : Dict = mask_image
if original_image is not None:
_lowerCamelCase : List[str] = original_image
_lowerCamelCase : List[Any] = pipe_loaded(**__lowerCAmelCase )[0]
_lowerCamelCase : str = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase ,1e-4 )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : List[str] = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_lowerCamelCase : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Dict = pipe_loaded(**__lowerCAmelCase )[0]
_lowerCamelCase : Dict = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase ,1e-4 )
| 46
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
__UpperCAmelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
__UpperCAmelCase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
__UpperCAmelCase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,)
def __lowercase ( self : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Optional[Any]=None ,A : List[str]=1 ,A : Optional[Any]="binary" ,A : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = fa_score(
A ,A ,labels=A ,pos_label=A ,average=A ,sample_weight=A )
return {"f1": float(A ) if score.size == 1 else score}
| 65
| 0
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = emb.weight.shape
UpperCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
UpperCAmelCase = emb.weight.data
return lin_layer
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = torch.load(_snake_case , map_location="""cpu""" )
UpperCAmelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
UpperCAmelCase = mam_aaa["""model"""]
remove_ignore_keys_(_snake_case )
UpperCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase = MaMaaaConfig(
vocab_size=_snake_case , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
UpperCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
UpperCAmelCase = MaMaaaForConditionalGeneration(_snake_case )
model.model.load_state_dict(_snake_case , strict=_snake_case )
UpperCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 711
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 378
| 0
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase_( _A :Dict , _A :Optional[Any]=False )-> Dict:
try:
UpperCamelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
__UpperCamelCase = parse_flag_from_env('RUN_SLOW', default=False)
def UpperCamelCase_( _A :Union[str, Any] )-> str:
return unittest.skip("Test was skipped" )(_A )
def UpperCamelCase_( _A :Tuple )-> Dict:
return unittest.skipUnless(_run_slow_tests , "test is slow" )(_A )
def UpperCamelCase_( _A :Optional[Any] )-> Dict:
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(_A )
def UpperCamelCase_( _A :List[str] )-> str:
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(_A )
def UpperCamelCase_( _A :Union[str, Any] )-> Tuple:
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(_A )
def UpperCamelCase_( _A :Optional[int] )-> Union[str, Any]:
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(_A )
def UpperCamelCase_( _A :List[str] )-> Dict:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(_A )
def UpperCamelCase_( _A :Any )-> Tuple:
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(_A )
def UpperCamelCase_( _A :Tuple )-> str:
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(_A )
def UpperCamelCase_( _A :List[str] )-> Dict:
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(_A )
def UpperCamelCase_( _A :List[str] )-> Optional[Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(_A )
def UpperCamelCase_( _A :Dict )-> Optional[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(_A )
def UpperCamelCase_( _A :Tuple )-> Dict:
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(_A )
def UpperCamelCase_( _A :Optional[int] )-> str:
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(_A )
def UpperCamelCase_( _A :List[Any] )-> Optional[int]:
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(_A )
def UpperCamelCase_( _A :int )-> Union[str, Any]:
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(_A )
def UpperCamelCase_( _A :List[Any]=None , _A :Optional[int]=None )-> Optional[Any]:
if test_case is None:
return partial(_A , version=_A )
return unittest.skipUnless(is_torch_version(">=" , _A ) , F'''test requires torch version >= {version}''' )(_A )
def UpperCamelCase_( _A :Optional[int] )-> Any:
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(_A )
def UpperCamelCase_( _A :Dict )-> Tuple:
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(_A )
def UpperCamelCase_( _A :Union[str, Any] )-> Union[str, Any]:
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(_A )
__UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase_( _A :Dict )-> Union[str, Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_A )
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Dict = True
@classmethod
def snake_case__ ( cls ):
'''simple docstring'''
UpperCamelCase__ = tempfile.mkdtemp()
@classmethod
def snake_case__ ( cls ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case__ ( self ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(snake_case )
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = mocks if isinstance(snake_case , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def UpperCamelCase_( _A :str )-> Union[str, Any]:
UpperCamelCase__ = AcceleratorState()
UpperCamelCase__ = tensor[None].clone().to(state.device )
UpperCamelCase__ = gather(_A ).cpu()
UpperCamelCase__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _A ):
return False
return True
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = returncode
UpperCamelCase__ = stdout
UpperCamelCase__ = stderr
async def UpperCamelCase_( _A :Optional[Any] , _A :Any )-> List[Any]:
while True:
UpperCamelCase__ = await stream.readline()
if line:
callback(_A )
else:
break
async def UpperCamelCase_( _A :List[str] , _A :List[str]=None , _A :str=None , _A :Optional[int]=None , _A :Dict=False , _A :Tuple=False )-> _RunOutput:
if echo:
print("\nRunning: " , " ".join(_A ) )
UpperCamelCase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ = []
UpperCamelCase__ = []
def tee(_A :Optional[int] , _A :Any , _A :Any , _A :Any="" ):
UpperCamelCase__ = line.decode("utf-8" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="stderr:" ) ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def UpperCamelCase_( _A :Optional[int] , _A :Dict=None , _A :Union[str, Any]=None , _A :Tuple=1_80 , _A :Union[str, Any]=False , _A :Optional[int]=True )-> _RunOutput:
UpperCamelCase__ = asyncio.get_event_loop()
UpperCamelCase__ = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
UpperCamelCase__ = " ".join(_A )
if result.returncode > 0:
UpperCamelCase__ = "\n".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
pass
def UpperCamelCase_( _A :List[str] , _A :Union[str, Any]=False )-> List[str]:
try:
UpperCamelCase__ = subprocess.check_output(_A , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_A , "decode" ):
UpperCamelCase__ = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(_A )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 551
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = MvpTokenizer
_UpperCamelCase : Any = MvpTokenizerFast
_UpperCamelCase : List[str] = True
_UpperCamelCase : Dict = filter_roberta_detectors
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ = dict(zip(snake_case , range(len(snake_case ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case ) )
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__ = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
# Test that special tokens are reset
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , padding=snake_case , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("labels" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(text_target=snake_case , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=snake_case , truncation=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization."]
UpperCamelCase__ = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , text_target=snake_case , return_tensors="pt" )
UpperCamelCase__ = inputs["input_ids"]
UpperCamelCase__ = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
UpperCamelCase__ = "A, <mask> AllenNLP sentence."
UpperCamelCase__ = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
UpperCamelCase__ = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 551
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_a : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def UpperCamelCase__ ( _A: int ):
'''simple docstring'''
__lowerCamelCase = {}
with open(_A , """r""" ) as file:
for line_number, line in enumerate(_A ):
__lowerCamelCase = line.strip()
if line:
__lowerCamelCase = line.split()
__lowerCamelCase = line_number
__lowerCamelCase = words[0]
__lowerCamelCase = value
return result
def UpperCamelCase__ ( _A: int , _A: List[Any] , _A: Union[str, Any] , _A: List[str] , _A: Optional[Any] ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__lowerCamelCase = getattr(_A , _A )
__lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
__lowerCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__lowerCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__lowerCamelCase = getattr(_A , _A ).shape
elif weight_type is not None and weight_type == "param":
__lowerCamelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__lowerCamelCase = getattr(_A , _A )
__lowerCamelCase = shape_pointer.shape
# let's reduce dimension
__lowerCamelCase = value[0]
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__lowerCamelCase = getattr(_A , _A )
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCamelCase__ ( _A: Any , _A: int , _A: Dict , _A: Tuple , _A: Any ):
'''simple docstring'''
__lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
__lowerCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__lowerCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__lowerCamelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__lowerCamelCase = """.""".join([key, hf_param_name] )
else:
__lowerCamelCase = key
__lowerCamelCase = value if """lm_head""" in full_key else value[0]
_a : str = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def UpperCamelCase__ ( _A: Tuple , _A: Dict , _A: List[Any]=None , _A: Union[str, Any]=None ):
'''simple docstring'''
__lowerCamelCase = False
for key, mapped_key in MAPPING.items():
__lowerCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(_A )[0].split(""".""" )[-2]
__lowerCamelCase = mapped_key.replace("""*""" , _A )
if "weight_g" in name:
__lowerCamelCase = """weight_g"""
elif "weight_v" in name:
__lowerCamelCase = """weight_v"""
elif "bias" in name:
__lowerCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = """weight"""
else:
__lowerCamelCase = None
if hf_dict is not None:
rename_dict(_A , _A , _A , _A , _A )
else:
set_recursively(_A , _A , _A , _A , _A )
return is_used
return is_used
def UpperCamelCase__ ( _A: Tuple , _A: Optional[Any] , _A: Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == """group""" , )
__lowerCamelCase = True
else:
__lowerCamelCase = load_wavaveca_layer(_A , _A , _A )
if not is_used:
unused_weights.append(_A )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCamelCase__ ( _A: int , _A: Union[str, Any] , _A: List[Any] , _A: Tuple , _A: Dict ):
'''simple docstring'''
__lowerCamelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCamelCase = name.split(""".""" )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__lowerCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__lowerCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__lowerCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__lowerCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_A )
@torch.no_grad()
def UpperCamelCase__ ( _A: Tuple , _A: Optional[Any] , _A: Dict=None , _A: Dict=None , _A: str=True , _A: Union[str, Any]=False ):
'''simple docstring'''
if config_path is not None:
__lowerCamelCase = WavaVecaConfig.from_pretrained(_A )
else:
__lowerCamelCase = WavaVecaConfig()
if is_seq_class:
__lowerCamelCase = read_txt_into_dict(_A )
__lowerCamelCase = idalabel
__lowerCamelCase = WavaVecaForSequenceClassification(_A )
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
feature_extractor.save_pretrained(_A )
elif is_finetuned:
if dict_path:
__lowerCamelCase = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase = target_dict.pad_index
__lowerCamelCase = target_dict.bos_index
__lowerCamelCase = target_dict.eos_index
__lowerCamelCase = len(target_dict.symbols )
__lowerCamelCase = os.path.join(_A , """vocab.json""" )
if not os.path.isdir(_A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
__lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCamelCase = 0
__lowerCamelCase = 1
with open(_A , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_A , _A )
__lowerCamelCase = WavaVecaCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_A , )
__lowerCamelCase = True if config.feat_extract_norm == """layer""" else False
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
__lowerCamelCase = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
__lowerCamelCase = WavaVecaForCTC(_A )
else:
__lowerCamelCase = WavaVecaForPreTraining(_A )
if is_finetuned or is_seq_class:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowerCamelCase = argparse.Namespace(task="""audio_pretraining""" )
__lowerCamelCase = fairseq.tasks.setup_task(_A )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_A )
__lowerCamelCase = model[0].eval()
recursively_load_weights(_A , _A , not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_a : Optional[int] = parser.parse_args()
_a : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 571
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_a : str = logging.getLogger(__name__)
_a : Optional[int] = 'Hello world! cécé herlolip'
_a : List[str] = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def UpperCamelCase__ ( _A: int , _A: List[str] ):
'''simple docstring'''
__lowerCamelCase = BertAbsConfig(
temp_dir=""".""" , finetune_bert=_A , large=_A , share_emb=_A , use_bert_emb=_A , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowerCamelCase = torch.load(_A , lambda _A , _A : storage )
__lowerCamelCase = AbsSummarizer(_A , torch.device("""cpu""" ) , _A )
original.eval()
__lowerCamelCase = BertAbsSummarizer(_A , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
__lowerCamelCase = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_A )) )
__lowerCamelCase = torch.tensor(_A ).unsqueeze(0 )
__lowerCamelCase = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_A )) )
__lowerCamelCase = torch.tensor(_A ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowerCamelCase = encoder_input_ids
__lowerCamelCase = decoder_input_ids
__lowerCamelCase = __lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = __lowerCamelCase = None
__lowerCamelCase = __lowerCamelCase = None
__lowerCamelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowerCamelCase = original(_A , _A , _A , _A , _A , _A , _A )[0]
__lowerCamelCase = original.generator(_A )
__lowerCamelCase = new_model(
_A , _A , _A , _A , _A )[0]
__lowerCamelCase = new_model.generator(_A )
__lowerCamelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(_A ) )
__lowerCamelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(_A ) )
__lowerCamelCase = torch.allclose(_A , _A , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_a : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 571
| 1
|
'''simple docstring'''
from __future__ import annotations
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) // 2
# choose the middle 3 elements
UpperCAmelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : Optional[int] = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 602
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowerCAmelCase_ : Dict = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 156
|
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase_ : Tuple = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def __A ( lowerCAmelCase_ ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCAmelCase_ : str = parser.parse_args()
if args.check_lib:
lowerCAmelCase_ : Any = importlib.import_module('''transformers''')
lowerCAmelCase_ : Dict = Path(transformers_module.__file__).parent
else:
lowerCAmelCase_ : str = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 156
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase : int = tf.cast(tf.math.not_equal(A__, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __A :
lowerCAmelCase_ : Any = OPTConfig
lowerCAmelCase_ : Union[str, Any] = {}
lowerCAmelCase_ : Any = 'gelu'
def __init__( self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Any=20 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : Tuple=16 , ):
lowerCAmelCase : Dict = parent
lowerCAmelCase : List[str] = batch_size
lowerCAmelCase : Tuple = seq_length
lowerCAmelCase : int = is_training
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : str = eos_token_id
lowerCAmelCase : int = pad_token_id
lowerCAmelCase : Union[str, Any] = bos_token_id
lowerCAmelCase : Dict = embed_dim
lowerCAmelCase : Tuple = word_embed_proj_dim
lowerCAmelCase : Any = False
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__a , **self.config_updates , )
lowerCAmelCase : int = prepare_opt_inputs_dict(__a , __a )
return config, inputs_dict
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Optional[int] = TFOPTModel(config=__a )
lowerCAmelCase : Dict = inputs_dict['input_ids']
lowerCAmelCase : List[Any] = input_ids[:1, :]
lowerCAmelCase : Optional[int] = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase : Any = 1
# first forward pass
lowerCAmelCase : int = model(__a , attention_mask=__a , use_cache=__a )
lowerCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase : Dict = model(__a , attention_mask=__a )[0]
lowerCAmelCase : str = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
@require_tf
class __A ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCAmelCase_ : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase_ : List[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase_ : List[str] = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = 10
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = TFOPTModelTester(self )
lowerCAmelCase : List[Any] = ConfigTester(self , config_class=__a )
def lowercase__ ( self : Any ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ):
if hasattr(__a , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__a , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCAmelCase : Any = model_class(config=__a )
lowerCAmelCase : Union[str, Any] = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCAmelCase : Any = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__a )
lowerCAmelCase : int = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCAmelCase : Tuple = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase : Tuple = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __a )
# check that weights remain the same after resizing
lowerCAmelCase : List[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase : str = False
self.assertTrue(__a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __a )
lowerCAmelCase : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase : Tuple = False
self.assertTrue(__a )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
return tf.constant(A__, dtype=tf.intaa )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : str = 99
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase : int = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase : int = input_ids.shape[0]
lowerCAmelCase : Optional[int] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : str = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCAmelCase : Dict = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCAmelCase : List[Any] = tf.not_equal(__a , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase : Dict = model(input_ids=__a , attention_mask=__a ).last_hidden_state
lowerCAmelCase : List[Any] = (1, 11, 512)
self.assertEqual(output.shape , __a )
lowerCAmelCase : str = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4E-3 ) )
lowerCAmelCase : int = tf.function(__a , jit_compile=__a )
lowerCAmelCase : Optional[int] = xla_generate(__a , __a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4E-2 ) )
@require_tf
@slow
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
super().setUp()
lowerCAmelCase : str = 'facebook/opt-350m'
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase : Optional[int] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase : str = tokenizer(__a , return_tensors='tf' , padding=__a , add_special_tokens=__a )
lowerCAmelCase : Tuple = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase : List[str] = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(__a , __a , atol=1E-4 ) )
lowerCAmelCase : Union[str, Any] = tf.function(__a , jit_compile=__a )
lowerCAmelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__a , __a , atol=1E-4 ) )
@require_tf
@slow
class __A ( unittest.TestCase ):
@property
def lowercase__ ( self : int ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase__ ( self : List[str] ):
lowerCAmelCase : List[str] = 'facebook/opt-125m'
lowerCAmelCase : Dict = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase : Tuple = []
lowerCAmelCase : str = GPTaTokenizer.from_pretrained(__a )
lowerCAmelCase : Dict = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCAmelCase : Dict = tokenizer(__a , return_tensors='tf' ).input_ids
lowerCAmelCase : int = model.generate(__a , max_length=10 )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = 'facebook/opt-350m'
lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(__a )
lowerCAmelCase : str = TFOPTForCausalLM.from_pretrained(__a )
lowerCAmelCase : Optional[int] = 'left'
# use different length sentences to test batching
lowerCAmelCase : List[Any] = [
'Hello, my dog is a little',
'Today, I',
]
lowerCAmelCase : Optional[int] = tokenizer(__a , return_tensors='tf' , padding=__a )
lowerCAmelCase : Tuple = inputs['input_ids']
lowerCAmelCase : Optional[Any] = model.generate(input_ids=__a , attention_mask=inputs['attention_mask'] )
lowerCAmelCase : Any = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCAmelCase : Optional[Any] = model.generate(input_ids=__a )
lowerCAmelCase : List[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCAmelCase : Union[str, Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCAmelCase : Any = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
lowerCAmelCase : Optional[int] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
lowerCAmelCase : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
lowerCAmelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
lowerCAmelCase : Any = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
def lowercase__ ( self : Any ):
lowerCAmelCase : Any = 'facebook/opt-350m'
lowerCAmelCase : str = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase : int = []
lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained(__a )
lowerCAmelCase : List[Any] = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCAmelCase : Optional[Any] = tokenizer(__a , return_tensors='tf' ).input_ids
lowerCAmelCase : List[Any] = model.generate(__a , max_length=10 )
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
| 343
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : Union[str, Any] = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ['''OwlViTFeatureExtractor''']
a_ : Tuple = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 594
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
__a = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
__a = {
"""allenai/longformer-base-4096""": 40_96,
"""allenai/longformer-large-4096""": 40_96,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 40_96,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 40_96,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def a_ ( ) -> Dict:
'''simple docstring'''
UpperCamelCase_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCamelCase_ = bs[:]
UpperCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
UpperCamelCase_ = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def a_ ( __snake_case ) -> Dict:
'''simple docstring'''
UpperCamelCase_ = set()
UpperCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase_ = char
return pairs
class A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]="replace" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : Union[str, Any]="</s>" , __UpperCAmelCase : str="<s>" , __UpperCAmelCase : List[str]="<unk>" , __UpperCAmelCase : List[Any]="<pad>" , __UpperCAmelCase : int="<mask>" , __UpperCAmelCase : str=False , **__UpperCAmelCase : Dict , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
UpperCamelCase_ = json.load(__UpperCAmelCase )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
UpperCamelCase_ = errors # how to handle errors in decoding
UpperCamelCase_ = bytes_to_unicode()
UpperCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding='utf-8' ) as merges_handle:
UpperCamelCase_ = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase_ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCamelCase_ = {}
UpperCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase_ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return len(self.encoder )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : int , __UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase_ = tuple(__UpperCAmelCase )
UpperCamelCase_ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCamelCase_ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase_ , UpperCamelCase_ = bigram
UpperCamelCase_ = []
UpperCamelCase_ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCamelCase_ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase_ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase_ = tuple(__UpperCAmelCase )
UpperCamelCase_ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCamelCase_ = get_pairs(__UpperCAmelCase )
UpperCamelCase_ = ' '.join(__UpperCAmelCase )
UpperCamelCase_ = word
return word
def lowercase__ ( self : int , __UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = []
for token in re.findall(self.pat , __UpperCAmelCase ):
UpperCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(' ' ) )
return bpe_tokens
def lowercase__ ( self : List[str] , __UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : int , __UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(__UpperCAmelCase )
def lowercase__ ( self : Dict , __UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = ''.join(__UpperCAmelCase )
UpperCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_ = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + '\n' )
UpperCamelCase_ = 0
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase_ = token_index
writer.write(' '.join(__UpperCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def lowercase__ ( self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
UpperCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowercase__ ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Any=False , **__UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
UpperCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
UpperCamelCase_ = ' ' + text
return (text, kwargs)
| 714
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = IFPipeline
_SCREAMING_SNAKE_CASE : Any = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def lowercase__ ( self : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any=0 ) -> Tuple:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(__UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
UpperCamelCase_ = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase_ , UpperCamelCase_ = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase_ = None
UpperCamelCase_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase_ = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase_ = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
_start_torch_memory_measurement()
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = pipe_a(
prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , num_inference_steps=2 , generator=__UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = pipe_a(
prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def lowercase__ ( self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ) -> int:
"""simple docstring"""
_start_torch_memory_measurement()
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = pipe_a(
prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=2 , generator=__UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = pipe_a(
prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , image=__UpperCAmelCase , original_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def lowercase__ ( self : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = pipe_a(
prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , num_inference_steps=2 , generator=__UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = pipe_a(
prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , original_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def a_ ( ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 559
| 0
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = (DPMSolverSDEScheduler,)
lowerCamelCase_ = 10
def lowerCamelCase_ ( self : List[Any] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] ={
'''num_train_timesteps''': 1100,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**UpperCAmelCase__ )
return config
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : List[Any] =self.scheduler_classes[0]
lowercase : Dict =self.get_scheduler_config()
lowercase : Tuple =scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase : int =self.dummy_model()
lowercase : Dict =self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase : List[str] =sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase : List[Any] =scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : List[str] =model(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Tuple =output.prev_sample
lowercase : int =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowercase : Optional[Any] =torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1E-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =self.scheduler_classes[0]
lowercase : str =self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase : Tuple =scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase : Optional[int] =self.dummy_model()
lowercase : str =self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase : str =sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase : Optional[int] =scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Any =model(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : List[str] =scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =output.prev_sample
lowercase : Any =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowercase : List[str] =torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1E-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1E-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1E-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1E-3
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[int] =self.scheduler_classes[0]
lowercase : List[str] =self.get_scheduler_config()
lowercase : List[Any] =scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ )
lowercase : List[Any] =self.dummy_model()
lowercase : Union[str, Any] =self.dummy_sample_deter.to(UpperCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase : List[Any] =scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =model(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Tuple =scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : List[str] =output.prev_sample
lowercase : Any =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowercase : Tuple =torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1E-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Tuple =self.scheduler_classes[0]
lowercase : Optional[Any] =self.get_scheduler_config()
lowercase : Union[str, Any] =scheduler_class(**UpperCAmelCase__ , use_karras_sigmas=UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ )
lowercase : Union[str, Any] =self.dummy_model()
lowercase : str =self.dummy_sample_deter.to(UpperCAmelCase__ ) * scheduler.init_noise_sigma
lowercase : List[str] =sample.to(UpperCAmelCase__ )
for t in scheduler.timesteps:
lowercase : int =scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =model(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Dict =scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =output.prev_sample
lowercase : Any =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowercase : List[Any] =torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
| 92
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[int] = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
a_ : Optional[int] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : List[Any] = self.scheduler_classes[0]
a_ : Optional[Any] = self.get_scheduler_config()
a_ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
a_ : int = self.scheduler_classes[0]
a_ : List[Any] = self.get_scheduler_config()
a_ : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.dummy_model()
a_ : str = self.dummy_sample_deter
a_ : int = self.dummy_sample_deter + 0.1
a_ : List[str] = self.dummy_sample_deter - 0.1
a_ : Dict = samplea.shape[0]
a_ : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
a_ : Optional[int] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
a_ : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
a_ : str = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
a_ : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
a_ : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
a_ : Any = self.scheduler_classes[0]
a_ : List[str] = self.get_scheduler_config()
a_ : int = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : Dict = len(SCREAMING_SNAKE_CASE__ )
a_ : int = self.dummy_model()
a_ : Optional[Any] = self.dummy_sample_deter
a_ : Any = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
a_ : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
a_ : Union[str, Any] = pred_prev_sample
a_ : List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
a_ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
a_ : Any = self.scheduler_classes[0]
a_ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
a_ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : Tuple = len(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.dummy_model()
a_ : Optional[Any] = self.dummy_sample_deter
a_ : str = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
a_ : Any = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
a_ : int = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
a_ : List[str] = pred_prev_sample
a_ : Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
a_ : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : Optional[int] = self.scheduler_classes[0]
a_ : int = self.get_scheduler_config()
a_ : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : int = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == len(SCREAMING_SNAKE_CASE__ ) - 1:
a_ : Any = -1
else:
a_ : Optional[int] = timesteps[i + 1]
a_ : Optional[Any] = scheduler.previous_timestep(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
a_ : Tuple = self.scheduler_classes[0]
a_ : List[Any] = self.get_scheduler_config()
a_ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : str = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : List[str] = self.scheduler_classes[0]
a_ : Dict = self.get_scheduler_config()
a_ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
a_ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
with self.assertRaises(SCREAMING_SNAKE_CASE__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE__ , timesteps=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : int = self.scheduler_classes[0]
a_ : str = self.get_scheduler_config()
a_ : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
| 570
| 0
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : List[Any] , lowercase__ : Dict , lowercase__ : str=1_3 , lowercase__ : str=3_0 , lowercase__ : List[Any]=2 , lowercase__ : Any=3 , lowercase__ : Optional[Any]=True , lowercase__ : str=True , lowercase__ : Any=3_2 , lowercase__ : List[Any]=5 , lowercase__ : int=4 , lowercase__ : Optional[Any]=3_7 , lowercase__ : List[Any]="gelu" , lowercase__ : Tuple=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : List[str]=1_0 , lowercase__ : Optional[int]=0.02 , lowercase__ : Union[str, Any]=3 , lowercase__ : Dict=0.6 , lowercase__ : List[Any]=None , ):
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = is_training
a_ = use_labels
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = type_sequence_label_size
a_ = initializer_range
a_ = mask_ratio
a_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
a_ = (image_size // patch_size) ** 2
a_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __magic_name__ ( self : int ):
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __magic_name__ ( self : Tuple , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Optional[int] ):
a_ = ViTMAEModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple , lowercase__ : Any , lowercase__ : Tuple , lowercase__ : Union[str, Any] ):
a_ = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ )
a_ = (self.image_size // self.patch_size) ** 2
a_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
a_ = 1
a_ = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ = model(lowercase__ )
a_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __magic_name__ ( self : Tuple ):
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( a__ , a__ , unittest.TestCase ):
_lowerCAmelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCAmelCase = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __magic_name__ ( self : str ):
a_ = ViTMAEModelTester(self )
a_ = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=3_7 )
def __magic_name__ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __magic_name__ ( self : Union[str, Any] ):
pass
def __magic_name__ ( self : Dict ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def __magic_name__ ( self : Dict ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(lowercase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def __magic_name__ ( self : str ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __magic_name__ ( self : Optional[int] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Tuple ):
# make masks reproducible
np.random.seed(2 )
a_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
a_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a_ = torch.from_numpy(lowercase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
a_ = pt_noise
super().check_pt_tf_models(lowercase__ , lowercase__ , lowercase__ )
def __magic_name__ ( self : Any ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
a_ = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
a_ = outputs[0].cpu().numpy()
a_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ )
a_ = model_class.from_pretrained(lowercase__ )
model.to(lowercase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
a_ = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
# Make sure we don't have nans
a_ = after_outputs[0].cpu().numpy()
a_ = 0
a_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __magic_name__ ( self : Optional[Any] ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __magic_name__ ( self : List[str] ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __magic_name__ ( self : List[Any] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Optional[Any] ):
pass
@slow
def __magic_name__ ( self : List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = ViTMAEModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def UpperCAmelCase__ ( ):
"""simple docstring"""
a_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : Optional[Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
a_ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowercase__ )
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
a_ = ViTMAEConfig()
a_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
a_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
a_ = model(**lowercase__ , noise=torch.from_numpy(lowercase__ ).to(device=lowercase__ ) )
# verify the logits
a_ = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , lowercase__ )
a_ = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowercase__ ) , atol=1e-4 ) )
| 704
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
UpperCamelCase__ = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class __lowercase ( a__ ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = LxmertTokenizer
def __init__( self : List[str] , lowercase__ : Optional[int]=None , lowercase__ : str=None , lowercase__ : str=True , lowercase__ : Union[str, Any]="[UNK]" , lowercase__ : List[Any]="[SEP]" , lowercase__ : Optional[Any]="[PAD]" , lowercase__ : Union[str, Any]="[CLS]" , lowercase__ : Optional[int]="[MASK]" , lowercase__ : Dict=True , lowercase__ : List[Any]=None , **lowercase__ : List[str] , ):
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowercase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowercase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowercase__ ) != tokenize_chinese_chars
):
a_ = getattr(lowercase__ , normalizer_state.pop('''type''' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**lowercase__ )
a_ = do_lower_case
def __magic_name__ ( self : List[str] , lowercase__ : Any , lowercase__ : List[str]=None ):
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Optional[int] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : Any , lowercase__ : str , lowercase__ : Optional[str] = None ):
a_ = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
| 143
| 0
|
from __future__ import annotations
from typing import Any
def UpperCamelCase ( snake_case__):
create_state_space_tree(snake_case__ , [] , 0)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
if index == len(snake_case__):
print(snake_case__)
return
create_state_space_tree(snake_case__ , snake_case__ , index + 1)
current_subsequence.append(sequence[index])
create_state_space_tree(snake_case__ , snake_case__ , index + 1)
current_subsequence.pop()
if __name__ == "__main__":
_lowercase = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 659
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_lowercase = True
from torch.cuda.amp import autocast
_lowercase = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
UpperCamelCase_ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def UpperCamelCase ( snake_case__ , snake_case__):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
lowerCAmelCase_ : str = logging.WARNING
if model_args.verbose_logging:
lowerCAmelCase_ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
lowerCAmelCase_ : Any = logging.INFO
logger.setLevel(snake_case__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase_ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
UpperCamelCase_ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase_ = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = "longest"
UpperCamelCase_ = None
UpperCamelCase_ = None
def __call__( self : str ,lowerCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.feature_extractor.pad(
lowerCAmelCase__ ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowerCAmelCase_ : List[str] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCAmelCase_ : Tuple = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowerCAmelCase_ : Optional[Any] = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCAmelCase_ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=lowerCAmelCase__ ,min_masks=2 ,)
return batch
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[str] ,*lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple=1 ,lowerCAmelCase__ : Optional[int]=0 ,lowerCAmelCase__ : Optional[Any]=1.0 ,**lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : int = max_gumbel_temp
lowerCAmelCase_ : Union[str, Any] = min_gumbel_temp
lowerCAmelCase_ : str = gumbel_temp_decay
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : nn.Module ,lowerCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowerCAmelCase_ : str = self._prepare_inputs(lowerCAmelCase__ )
if self.use_amp:
with autocast():
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
else:
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase_ : List[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase_ : Optional[Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase_ : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase__ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()
configure_logger(snake_case__ , snake_case__)
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Any = DatasetDict()
lowerCAmelCase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Union[str, Any] = DatasetDict()
lowerCAmelCase_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCAmelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case__)
def prepare_dataset(snake_case__):
# check that all files have the correct sampling rate
lowerCAmelCase_ , lowerCAmelCase_ : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate)
return batch
# load audio files into numpy arrays
lowerCAmelCase_ : int = datasets.map(
snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names)
# filter audio files that are too long
lowerCAmelCase_ : int = vectorized_datasets.filter(
lambda snake_case__: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate))
def normalize(snake_case__):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate)
# normalize and transform to `BatchFeatures`
lowerCAmelCase_ : str = vectorized_datasets.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCAmelCase_ : Optional[Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'")
lowerCAmelCase_ : Dict = WavaVecaForPreTraining(snake_case__)
lowerCAmelCase_ : int = DataCollatorForWavaVecaPretraining(model=snake_case__ , feature_extractor=snake_case__)
lowerCAmelCase_ : List[Any] = WavaVecaPreTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=snake_case__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 659
| 1
|
class UpperCAmelCase__ :
def __init__( self ,A__ ,A__ ,A__ ):
_A : Optional[int] = name
_A : int = value
_A : Optional[Any] = weight
def __repr__( self ):
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def A__ ( self ):
return self.value
def A__ ( self ):
return self.name
def A__ ( self ):
return self.weight
def A__ ( self ):
return self.value / self.weight
def a__ (__lowercase :Union[str, Any] , __lowercase :List[Any] , __lowercase :Optional[int] ) -> Tuple:
_A : int = []
for i in range(len(__lowercase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a__ (__lowercase :str , __lowercase :Tuple , __lowercase :List[str] ) -> Union[str, Any]:
_A : int = sorted(__lowercase , key=__lowercase , reverse=__lowercase )
_A : Optional[int] = []
_A , _A : Optional[int] = 0.0, 0.0
for i in range(len(__lowercase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a__ () -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCamelCase : Union[str, Any] ='\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCamelCase : List[str] ='\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCamelCase : str ='\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def a__ (__lowercase :List[Any] , __lowercase :List[Any] ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a__ (__lowercase :Tuple , __lowercase :List[Any] , __lowercase :Union[str, Any]="binary" ) -> Optional[Any]:
_A : Union[str, Any] = simple_accuracy(__lowercase , __lowercase )
_A : str = float(fa_score(y_true=__lowercase , y_pred=__lowercase , average=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ (__lowercase :List[str] , __lowercase :Optional[Any] ) -> List[str]:
_A : str = {}
for id_pred, label in zip(__lowercase , __lowercase ):
_A : Optional[int] = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_A : Tuple = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_A : Union[str, Any] = [(pred, label)]
_A , _A : List[Any] = [], []
for question, preds_labels in question_map.items():
_A , _A : List[str] = zip(*__lowercase )
_A : Union[str, Any] = fa_score(y_true=__lowercase , y_pred=__lowercase , average='''macro''' )
fas.append(__lowercase )
_A : Optional[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(__lowercase ) )
ems.append(__lowercase )
_A : Optional[int] = float(sum(__lowercase ) / len(__lowercase ) )
_A : Dict = sum(__lowercase ) / len(__lowercase )
_A : List[Any] = float(fa_score(y_true=__lowercase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def A__ ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None ,)
def A__ ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def A__ ( self ,A__ ,A__ ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(A__ ,A__ )}
elif self.config_name == "cb":
return acc_and_fa(A__ ,A__ ,fa_avg='''macro''' )
elif self.config_name == "record":
_A : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_A : int = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(A__ ,A__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(A__ ,A__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(A__ ,A__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 332
| 1
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _lowerCamelCase ( __A : str , __A : Tuple=() , __A : List[str]=None , __A : Dict="no" , __A : Any="29500" ) -> Any:
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : List[str] = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
_UpperCAmelCase : Optional[int] = True
elif "IPython" in sys.modules:
_UpperCAmelCase : Union[str, Any] = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
_UpperCAmelCase : int = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
_UpperCAmelCase : List[Any] = 8
_UpperCAmelCase : List[Any] = PrepareForLaunch(__A , distributed_type='''TPU''' )
print(f'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__A , args=__A , nprocs=__A , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__A )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__A , master_addr='''127.0.01''' , master_port=__A , mixed_precision=__A ):
_UpperCAmelCase : str = PrepareForLaunch(__A , distributed_type='''MULTI_GPU''' )
print(f'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__A , args=__A , nprocs=__A , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_UpperCAmelCase : Dict = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__A )
def _lowerCamelCase ( __A : Any , __A : Union[str, Any]=() , __A : int=2 ) -> Any:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__A , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
_UpperCAmelCase : List[str] = PrepareForLaunch(__A , debug=__A )
start_processes(__A , args=__A , nprocs=__A , start_method='''fork''' )
| 485
|
def _lowerCamelCase ( __A : list ) -> list:
if any(not isinstance(__A , __A ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(__A ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__A , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 485
| 1
|
import requests
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> None:
"""simple docstring"""
snake_case_ = {'''Content-Type''': '''application/json'''}
snake_case_ = requests.post(SCREAMING_SNAKE_CASE , json={'''text''': message_body} , headers=SCREAMING_SNAKE_CASE )
if response.status_code != 200:
snake_case_ = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 531
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase__ ( self ):
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
snake_case_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = '''A red cat sitting on a park bench'''
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images
snake_case_ = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case_ = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
snake_case_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = '''A red cat sitting on a park bench'''
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images
snake_case_ = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case_ = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 531
| 1
|
"""simple docstring"""
import operator as op
def lowerCamelCase_ ( __lowerCAmelCase ) -> int:
'''simple docstring'''
lowerCamelCase__ =[]
lowerCamelCase__ =lambda __lowerCAmelCase , __lowerCAmelCase : int(x / y ) # noqa: E731 integer division operation
lowerCamelCase__ ={
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__lowerCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__lowerCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__lowerCAmelCase ) , sep=" | " )
else:
lowerCamelCase__ =stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__lowerCAmelCase ) , sep=" | " )
lowerCamelCase__ =stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__lowerCAmelCase ) , sep=" | " )
stack.append(
str(opr[x](int(__lowerCAmelCase ) , int(__lowerCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__lowerCAmelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
a =input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 530
|
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("only integers accepted as input" )
else:
lowerCamelCase__ =str(abs(__lowerCAmelCase ) )
lowerCamelCase__ =[list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int("".join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 530
| 1
|
import argparse
import os
import re
__A : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
__A : Any = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__A : Optional[int] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A : int = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__A : Tuple = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A : int = re.compile(r'\[([^\]]+)\]')
def __a ( A__ : List[str] ):
SCREAMING_SNAKE_CASE = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def __a ( A__ : str , A__ : Dict="" , A__ : List[str]=None , A__ : Tuple=None ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
SCREAMING_SNAKE_CASE = ["\n".join(lines[:index] )]
else:
SCREAMING_SNAKE_CASE = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
SCREAMING_SNAKE_CASE = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(A__ ) )
if index < len(A__ ) - 1:
SCREAMING_SNAKE_CASE = [lines[index + 1]]
index += 1
else:
SCREAMING_SNAKE_CASE = []
else:
blocks.append("\n".join(A__ ) )
SCREAMING_SNAKE_CASE = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append("\n".join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def __a ( A__ : Optional[Any] ):
def _inner(A__ : Any ):
return key(A__ ).lower().replace("_" , "" )
return _inner
def __a ( A__ : str , A__ : Union[str, Any]=None ):
# If no key is provided, we use a noop.
def noop(A__ : Any ):
return x
if key is None:
SCREAMING_SNAKE_CASE = noop
# Constants are all uppercase, they go first.
SCREAMING_SNAKE_CASE = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
SCREAMING_SNAKE_CASE = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
SCREAMING_SNAKE_CASE = [obj for obj in objects if not key(A__ )[0].isupper()]
SCREAMING_SNAKE_CASE = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def __a ( A__ : int ):
# This inner function sort imports between [ ].
def _replace(A__ : Tuple ):
SCREAMING_SNAKE_CASE = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
SCREAMING_SNAKE_CASE = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
SCREAMING_SNAKE_CASE = import_statement.split("\n" )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
SCREAMING_SNAKE_CASE = 2 if lines[1].strip() == "[" else 1
SCREAMING_SNAKE_CASE = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
SCREAMING_SNAKE_CASE = sort_objects(A__ , key=lambda A__ : x[1] )
SCREAMING_SNAKE_CASE = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
SCREAMING_SNAKE_CASE = _re_bracket_content.sub(_replace , lines[1] )
else:
SCREAMING_SNAKE_CASE = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE = keys[:-1]
SCREAMING_SNAKE_CASE = get_indent(lines[1] ) + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
SCREAMING_SNAKE_CASE = _re_bracket_content.sub(_replace , A__ )
return import_statement
def __a ( A__ : Optional[Any] , A__ : str=True ):
with open(A__ , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
SCREAMING_SNAKE_CASE = split_code_in_indented_blocks(
A__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
SCREAMING_SNAKE_CASE = main_blocks[block_idx]
SCREAMING_SNAKE_CASE = block.split("\n" )
# Get to the start of the imports.
SCREAMING_SNAKE_CASE = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
SCREAMING_SNAKE_CASE = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
SCREAMING_SNAKE_CASE = "\n".join(block_lines[line_idx:-1] )
SCREAMING_SNAKE_CASE = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
SCREAMING_SNAKE_CASE = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
SCREAMING_SNAKE_CASE = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
SCREAMING_SNAKE_CASE = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
SCREAMING_SNAKE_CASE = [(i, key) for i, key in enumerate(A__ ) if key is not None]
SCREAMING_SNAKE_CASE = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = []
for i in range(len(A__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
SCREAMING_SNAKE_CASE = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
SCREAMING_SNAKE_CASE = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(A__ ) )
def __a ( A__ : Optional[Any]=True ):
SCREAMING_SNAKE_CASE = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE = sort_imports(os.path.join(A__ , "__init__.py" ) , check_only=A__ )
if result:
SCREAMING_SNAKE_CASE = [os.path.join(A__ , "__init__.py" )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__A : Dict = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 698
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@slow
@require_torch
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE = 4
def _map_to_encoder_decoder_inputs(__lowerCamelCase : str ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE = tokenizer(batch["article"] , padding="max_length" , truncation=__lowerCamelCase , max_length=512 )
SCREAMING_SNAKE_CASE = tokenizer(batch["highlights"] , padding="max_length" , truncation=__lowerCamelCase , max_length=128 )
SCREAMING_SNAKE_CASE = inputs.input_ids
SCREAMING_SNAKE_CASE = inputs.attention_mask
SCREAMING_SNAKE_CASE = outputs.input_ids
SCREAMING_SNAKE_CASE = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
SCREAMING_SNAKE_CASE = outputs.attention_mask
assert all(len(__lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = pred.label_ids
SCREAMING_SNAKE_CASE = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
SCREAMING_SNAKE_CASE = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments(
output_dir=__lowerCamelCase , per_device_train_batch_size=__lowerCamelCase , per_device_eval_batch_size=__lowerCamelCase , predict_with_generate=__lowerCamelCase , evaluation_strategy="steps" , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , tokenizer=__lowerCamelCase , )
# start training
trainer.train()
| 698
| 1
|
from __future__ import annotations
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
if b == 0:
return (1, 0)
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) =extended_euclid(__UpperCamelCase, a % b )
SCREAMING_SNAKE_CASE__ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) =extended_euclid(__UpperCamelCase, __UpperCamelCase )
SCREAMING_SNAKE_CASE__ =na * na
SCREAMING_SNAKE_CASE__ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) =extended_euclid(__UpperCamelCase, __UpperCamelCase )
if b < 0:
SCREAMING_SNAKE_CASE__ =(b % n + n) % n
return b
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =invert_modulo(__UpperCamelCase, __UpperCamelCase ), invert_modulo(__UpperCamelCase, __UpperCamelCase )
SCREAMING_SNAKE_CASE__ =na * na
SCREAMING_SNAKE_CASE__ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 151
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""hf-internal-testing/tiny-random-t5"""
SCREAMING_SNAKE_CASE__ =AutoTokenizer.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =tokenizer("""This is me""" ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ =model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE__ =model.generate(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE__ =model_reloaded.generate(**_UpperCamelCase )
self.assertTrue(torch.allclose(_UpperCamelCase ,_UpperCamelCase ) )
def __A ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""hf-internal-testing/tiny-random-t5"""
SCREAMING_SNAKE_CASE__ =AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_UpperCamelCase ):
model.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =model.reverse_bettertransformer()
model.save_pretrained(_UpperCamelCase )
| 151
| 1
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : str = 1.5
lowercase__ : List[Any] = int(factor * num_class_images )
lowercase__ : int = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=UpperCAmelCase__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase__ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase__ : List[str] = int(factor * num_images )
lowercase__ : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase__ : List[str] = 0
lowercase__ : Dict = 0
lowercase__ : Tuple = tqdm(desc='''downloading real regularization images''' , total=UpperCAmelCase__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase__ : str = class_images[count]
count += 1
try:
lowercase__ : Union[str, Any] = requests.get(images['''url'''] )
if img.status_code == 200:
lowercase__ : List[str] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __UpperCamelCase ( ):
lowercase__ : Any = argparse.ArgumentParser('''''' , add_help=UpperCAmelCase__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
__a: Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 708
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a: Optional[int] = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Union[str, Any] = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__a: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 428
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : str = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
| 650
|
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A_ : int =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
self.check_model_type(a__ )
def snake_case_ ( self , a__=None , a__=None , a__=None , **a__ ):
_lowerCamelCase , _lowerCamelCase = {}, {}
if padding is not None:
_lowerCamelCase = padding
if truncation is not None:
_lowerCamelCase = truncation
if top_k is not None:
_lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a__ , a__ = None , **a__ ):
if isinstance(a__ , (Image.Image, str) ) and isinstance(a__ , a__ ):
_lowerCamelCase = {'image': image, 'question': question}
else:
_lowerCamelCase = image
_lowerCamelCase = super().__call__(a__ , **a__ )
return results
def snake_case_ ( self , a__ , a__=False , a__=False ):
_lowerCamelCase = load_image(inputs['image'] )
_lowerCamelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=a__ , truncation=a__ )
_lowerCamelCase = self.image_processor(images=a__ , return_tensors=self.framework )
model_inputs.update(a__ )
return model_inputs
def snake_case_ ( self , a__ ):
_lowerCamelCase = self.model(**a__ )
return model_outputs
def snake_case_ ( self , a__ , a__=5 ):
if top_k > self.model.config.num_labels:
_lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase = model_outputs.logits.sigmoid()[0]
_lowerCamelCase , _lowerCamelCase = probs.topk(a__ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_lowerCamelCase = scores.tolist()
_lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(a__ , a__ )]
| 650
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : str="cls" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Tuple = project_dim
__UpperCAmelCase : Any = pooler_fn
__UpperCAmelCase : Tuple = learn_encoder
__UpperCAmelCase : Optional[int] = use_attention_mask
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [R'''pooler''', R'''logit_scale''']
SCREAMING_SNAKE_CASE = [R'''position_ids''', R'''predictions.decoder.bias''']
SCREAMING_SNAKE_CASE = '''roberta'''
SCREAMING_SNAKE_CASE = RobertaSeriesConfig
def __init__( self : Tuple , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = XLMRobertaModel(UpperCAmelCase_ )
__UpperCAmelCase : Any = nn.Linear(config.hidden_size , config.project_dim )
__UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase_ , "has_pre_transformation" , UpperCAmelCase_ )
if self.has_pre_transformation:
__UpperCAmelCase : List[str] = nn.Linear(config.hidden_size , config.project_dim )
__UpperCAmelCase : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : List[Any] = self.base_model(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , output_attentions=UpperCAmelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCAmelCase_ , )
if self.has_pre_transformation:
__UpperCAmelCase : Tuple = outputs["hidden_states"][-2]
__UpperCAmelCase : int = self.pre_LN(UpperCAmelCase_ )
__UpperCAmelCase : Any = self.transformation_pre(UpperCAmelCase_ )
return TransformationModelOutput(
projection_state=UpperCAmelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__UpperCAmelCase : Dict = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCAmelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 329
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
pass
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Any = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : int = DepthEstimationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , UpperCAmelCase_ )
import datasets
__UpperCAmelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__UpperCAmelCase : Dict = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , UpperCAmelCase_ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "Intel/dpt-large"
__UpperCAmelCase : Optional[int] = pipeline("depth-estimation" , model=UpperCAmelCase_ )
__UpperCAmelCase : Any = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__UpperCAmelCase : str = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 329
| 1
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , __a , __a , __a=1024 , __a=1024 , __a=3.6 ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = tokenizer.bos_token_id
UpperCAmelCase__ = dataset
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = seq_length * chars_per_token * num_of_sequences
def __iter__(self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = iter(self.dataset )
UpperCAmelCase__ = True
while more_examples:
UpperCAmelCase__ , UpperCAmelCase__ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__a )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCAmelCase__ = False
break
UpperCAmelCase__ = tokenizer(__a , truncation=__a )['input_ids']
UpperCAmelCase__ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__a ) , self.seq_length ):
UpperCAmelCase__ = all_token_ids[i : i + self.seq_length]
if len(__a ) == self.seq_length:
yield torch.tensor(__a )
def UpperCamelCase_( snake_case__: Optional[int] ) -> List[Any]:
UpperCAmelCase__ = {'streaming': True}
UpperCAmelCase__ = load_dataset(args.dataset_name , split='train' , **snake_case__ )
UpperCAmelCase__ = ConstantLengthDataset(snake_case__ , snake_case__ , seq_length=args.seq_length )
UpperCAmelCase__ = DataLoader(snake_case__ , batch_size=args.batch_size )
return eval_dataloader
def UpperCamelCase_( snake_case__: Any ) -> Dict:
model.eval()
UpperCAmelCase__ = []
for step, batch in enumerate(snake_case__ ):
with torch.no_grad():
UpperCAmelCase__ = model(snake_case__ , labels=snake_case__ )
UpperCAmelCase__ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(snake_case__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCAmelCase__ = torch.mean(torch.cat(snake_case__ ) )
try:
UpperCAmelCase__ = torch.exp(snake_case__ )
except OverflowError:
UpperCAmelCase__ = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
_UpperCamelCase = Accelerator()
# Parse configuration
_UpperCamelCase = HfArgumentParser(EvaluationArguments)
_UpperCamelCase = parser.parse_args()
set_seed(args.seed)
# Logging
_UpperCamelCase = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
_UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_UpperCamelCase = create_dataloader(args)
# Prepare everything with our `accelerator`.
_UpperCamelCase , _UpperCamelCase = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
_UpperCamelCase , _UpperCamelCase = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 146
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BlenderbotSmallConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = """gelu"""
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = pad_token_id
UpperCAmelCase__ = bos_token_id
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase__ = prepare_blenderbot_small_inputs_dict(__a , __a , __a )
return config, inputs_dict
def UpperCamelCase__ (self , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFBlenderbotSmallModel(config=__a ).get_decoder()
UpperCAmelCase__ = inputs_dict['input_ids']
UpperCAmelCase__ = input_ids[:1, :]
UpperCAmelCase__ = inputs_dict['attention_mask'][:1, :]
UpperCAmelCase__ = inputs_dict['head_mask']
UpperCAmelCase__ = 1
# first forward pass
UpperCAmelCase__ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
UpperCAmelCase__ , UpperCAmelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase__ = model(__a , attention_mask=__a )[0]
UpperCAmelCase__ = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
def UpperCamelCase_( snake_case__: Any , snake_case__: List[str] , snake_case__: Dict , snake_case__: Any=None , snake_case__: int=None , snake_case__: int=None , snake_case__: int=None , snake_case__: Optional[int]=None , ) -> int:
if attention_mask is None:
UpperCAmelCase__ = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__SCREAMING_SNAKE_CASE = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFBlenderbotSmallModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
__SCREAMING_SNAKE_CASE = """facebook/blenderbot_small-90M"""
@cached_property
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCAmelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , )
UpperCAmelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 146
| 1
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 152
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152
| 1
|
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : Dict = []
snake_case__ : Tuple = []
snake_case__ : Optional[Any] = []
for rt in rc.restypes:
snake_case__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
snake_case__ : Optional[int] = {name: i for i, name in enumerate(UpperCamelCase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
snake_case__ : Optional[int] = torch.tensor(
UpperCamelCase__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
snake_case__ : Dict = torch.tensor(
UpperCamelCase__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
snake_case__ : Optional[int] = torch.tensor(
UpperCamelCase__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
snake_case__ : Optional[Any] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
snake_case__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
snake_case__ : Optional[Any] = restype_atomaa_mask[protein_aatype]
snake_case__ : Dict = residx_atomaa_mask
snake_case__ : str = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
snake_case__ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
snake_case__ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
snake_case__ : Dict = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
snake_case__ : Optional[int] = rc.restype_atoa[restype_letter]
snake_case__ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
snake_case__ : Dict = rc.atom_order[atom_name]
snake_case__ : Tuple = 1
snake_case__ : List[str] = restype_atomaa_mask[protein_aatype]
snake_case__ : Optional[Any] = residx_atomaa_mask
return protein
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : str = tree_map(lambda _lowerCAmelCase : torch.tensor(UpperCamelCase__ , device=batch["""aatype"""].device ) , UpperCamelCase__ , np.ndarray )
snake_case__ : Any = tensor_tree_map(lambda _lowerCAmelCase : np.array(UpperCamelCase__ ) , make_atomaa_masks(UpperCamelCase__ ) )
return out
| 374
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
a_ : List[Any] = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
a_ : str = 1024
a_ : List[str] = 4096
a_ : int = 24
a_ : int = 16
a_ : Optional[Any] = [5, 11, 17, 23]
a_ : Optional[int] = [256, 512, 1024, 1024]
a_ : Union[str, Any] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
a_ : Dict = 768
a_ : str = [1, 1, 1, 0.5]
a_ : Dict = [256, 512, 768, 768]
a_ : int = 150
a_ : Any = 16
a_ : Optional[int] = (1, 384, 384)
a_ : Optional[Any] = False
a_ : Dict = """project"""
if "ade" in checkpoint_url:
a_ : int = True
a_ : int = 768
a_ : Optional[int] = [1, 1, 1, 0.5]
a_ : Dict = 150
a_ : Any = 16
a_ : Optional[int] = """huggingface/label-files"""
a_ : Optional[int] = """ade20k-id2label.json"""
a_ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) ) , """r""" ) )
a_ : str = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
a_ : Optional[int] = idalabel
a_ : Optional[Any] = {v: k for k, v in idalabel.items()}
a_ : int = [1, 150, 480, 480]
return config, expected_shape
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
a_ : Dict = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : str ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
a_ : List[str] = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
a_ : Union[str, Any] = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
a_ : Optional[int] = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
a_ : List[str] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
a_ : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
a_ : Dict = name.replace("""proj""" , """projection""" )
if "blocks" in name:
a_ : Union[str, Any] = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
a_ : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
a_ : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
a_ : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
a_ : int = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
a_ : Tuple = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
a_ : str = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
a_ : Optional[Any] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
a_ : Dict = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
a_ : Tuple = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
a_ : str = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
a_ : List[Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
a_ : List[str] = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
a_ : Union[str, Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
a_ : Any = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
a_ : List[str] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
a_ : str = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
a_ : Dict = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
a_ : str = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
a_ : int = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
a_ : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
a_ : str = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
a_ : Optional[int] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
a_ : Optional[Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
a_ : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
a_ : int = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
a_ : Any = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
a_ : int = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
a_ : Any = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
a_ : Union[str, Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
a_ : Any = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
a_ : Any = name.replace("""..""" , """.""" )
if "stem.conv" in name:
a_ : Optional[int] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
a_ : Dict = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
a_ : str = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
a_ : Any = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
a_ : Dict = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
a_ : List[str] = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
a_ : int = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
a_ : Optional[Any] = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
a_ : Any = in_proj_weight[: config.hidden_size, :]
a_ : int = in_proj_bias[: config.hidden_size]
a_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
a_ : str = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
a_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Union[str, Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
a_ , a_ : Optional[Any] = get_dpt_config(UpperCamelCase__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
a_ : int = torch.load(UpperCamelCase__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCamelCase__ )
# rename keys
for key in state_dict.copy().keys():
a_ : Any = state_dict.pop(UpperCamelCase__ )
a_ : Tuple = val
# read in qkv matrices
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
a_ : Any = DPTForSemanticSegmentation(UpperCamelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Check outputs on an image
a_ : str = 480 if """ade""" in checkpoint_url else 384
a_ : Optional[int] = DPTImageProcessor(size=UpperCamelCase__ )
a_ : Dict = prepare_img()
a_ : Any = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
# forward pass
a_ : Dict = model(**UpperCamelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCamelCase__ ).predicted_depth
if show_prediction:
a_ : Any = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=UpperCamelCase__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 442
| 0
|
from math import sqrt
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 704
|
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ):
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
_UpperCamelCase = field
_UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths}
_UpperCamelCase = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def UpperCamelCase_ ( self : List[str] ):
# Build iterable dataset
if self.streaming:
_UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
_UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_UpperCamelCase = dataset
_UpperCamelCase = path_or_buf
_UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCamelCase = num_proc
_UpperCamelCase = '''utf-8'''
_UpperCamelCase = to_json_kwargs
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A )
_UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' )
_UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
_UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
_UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer:
_UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
_UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
return written
def UpperCamelCase_ ( self : Any , _A : Optional[Any] ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args
_UpperCamelCase = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ):
_UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_A )
else:
_UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_A )
return written
| 71
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def lowercase ( lowerCAmelCase__ : Callable[[int | float], int | float] , lowerCAmelCase__ : int | float , lowerCAmelCase__ : int | float , lowerCAmelCase__ : int = 100 , ) -> float:
__a = x_start
__a = fnc(lowerCAmelCase__ )
__a = 0.0
for _ in range(lowerCAmelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
__a = (x_end - x_start) / steps + xa
__a = fnc(lowerCAmelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__a = xa
__a = fxa
return length
if __name__ == "__main__":
def lowercase ( lowerCAmelCase__ : Any ) -> Tuple:
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowercase_ = 1_0
while i <= 1_0_0_0_0_0:
print(F'''With {i} steps: {line_length(f, -1_0, 1_0, i)}''')
i *= 1_0
| 695
|
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(1_0_0, 2_0_0)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ""
lowercase_ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 695
| 1
|
'''simple docstring'''
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def a_ ( _UpperCAmelCase : List[str]=None ) -> Optional[int]:
if subparsers is not None:
__snake_case : Tuple = subparsers.add_parser('test' )
else:
__snake_case : str = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' ,default=_UpperCAmelCase ,help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) ,)
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def a_ ( _UpperCAmelCase : List[str] ) -> List[str]:
__snake_case : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__snake_case : int = script_name
else:
__snake_case : int = f'''--config_file={args.config_file} {script_name}'''
__snake_case : int = ['accelerate-launch'] + test_args.split()
__snake_case : List[str] = execute_subprocess_async(_UpperCAmelCase ,env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def a_ ( ) -> int:
__snake_case : Optional[Any] = test_command_parser()
__snake_case : int = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 701
|
'''simple docstring'''
import requests
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> None:
__snake_case : Tuple = {'Content-Type': 'application/json'}
__snake_case : Optional[int] = requests.post(_UpperCAmelCase ,json={'text': message_body} ,headers=_UpperCAmelCase )
if response.status_code != 2_00:
__snake_case : Tuple = (
'Request to slack returned an error '
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 124
| 0
|
'''simple docstring'''
lowercase__ : List[str] = 'Alexander Joslin'
import operator as op
from .stack import Stack
def a__ ( lowercase : str ) -> int:
"""simple docstring"""
_UpperCamelCase = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](lowercase, lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowercase__ : Tuple = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 98
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Any = 'T5Config'
def a__ ( lowercase : jnp.array, lowercase : int, lowercase : int ) -> jnp.ndarray:
"""simple docstring"""
_UpperCamelCase = jnp.zeros_like(lowercase )
_UpperCamelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_UpperCamelCase = shifted_input_ids.at[:, 0].set(lowercase )
_UpperCamelCase = jnp.where(shifted_input_ids == -100, lowercase, lowercase )
return shifted_input_ids
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[Any] = 'mt5'
_snake_case : Union[str, Any] = MTaConfig
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'mt5'
_snake_case : int = MTaConfig
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = 'mt5'
_snake_case : Optional[Any] = MTaConfig
| 98
| 1
|
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : Union[str, Any] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def snake_case_ (self , lowerCAmelCase__=0 ):
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[int] = np.random.RandomState(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.7_5,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self.get_dummy_inputs()
_UpperCAmelCase : Union[str, Any] = pipe(**lowerCAmelCase__ ).images
_UpperCAmelCase : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_UpperCAmelCase : List[str] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def snake_case_ (self ):
_UpperCAmelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.get_dummy_inputs()
_UpperCAmelCase : List[str] = pipe(**lowerCAmelCase__ ).images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_UpperCAmelCase : Any = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case_ (self ):
_UpperCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# warmup pass to apply optimizations
_UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs() )
_UpperCAmelCase : Any = self.get_dummy_inputs()
_UpperCAmelCase : Dict = pipe(**lowerCAmelCase__ ).images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_UpperCAmelCase : str = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Dict = self.get_dummy_inputs()
_UpperCAmelCase : List[Any] = pipe(**lowerCAmelCase__ ).images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_UpperCAmelCase : Dict = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_UpperCAmelCase : Union[str, Any] = pipe(**lowerCAmelCase__ ).images
_UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_UpperCAmelCase : Any = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
_UpperCAmelCase : str = pipe(**lowerCAmelCase__ ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_UpperCAmelCase : int = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
@property
def snake_case_ (self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ (self ):
_UpperCAmelCase : str = ort.SessionOptions()
_UpperCAmelCase : Optional[int] = False
return options
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_UpperCAmelCase : Dict = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
_UpperCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Any = """A fantasy landscape, trending on artstation"""
_UpperCAmelCase : Optional[int] = np.random.RandomState(0 )
_UpperCAmelCase : Tuple = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCAmelCase__ , output_type="""np""" , )
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : Optional[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def snake_case_ (self ):
_UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_UpperCAmelCase : Optional[int] = init_image.resize((7_6_8, 5_1_2) )
_UpperCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
_UpperCAmelCase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : int = """A fantasy landscape, trending on artstation"""
_UpperCAmelCase : Optional[Any] = np.random.RandomState(0 )
_UpperCAmelCase : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowerCAmelCase__ , output_type="""np""" , )
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_UpperCAmelCase : List[Any] = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 720
|
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( __a ):
@slow
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : Dict = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_UpperCAmelCase : Tuple = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : List[str] = bertabert.config.encoder.vocab_size
_UpperCAmelCase : List[str] = tokenizer.sep_token_id
_UpperCAmelCase : List[Any] = tokenizer.cls_token_id
_UpperCAmelCase : List[str] = 1_2_8
_UpperCAmelCase : int = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_UpperCAmelCase : Tuple = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_UpperCAmelCase : str = train_dataset.select(range(3_2 ) )
_UpperCAmelCase : str = val_dataset.select(range(1_6 ) )
_UpperCAmelCase : Any = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase : List[str] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=5_1_2 )
_UpperCAmelCase : Union[str, Any] = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=1_2_8 )
_UpperCAmelCase : List[Any] = inputs.input_ids
_UpperCAmelCase : int = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = outputs.input_ids
_UpperCAmelCase : List[Any] = outputs.input_ids.copy()
_UpperCAmelCase : str = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase : Tuple = outputs.attention_mask
assert all(len(lowerCAmelCase__ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowerCAmelCase__ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = pred.label_ids
_UpperCAmelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase__ ) )] ) / len(lowerCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase : int = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_UpperCAmelCase : Tuple = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_UpperCAmelCase : Any = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : int = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase__ , per_device_train_batch_size=lowerCAmelCase__ , per_device_eval_batch_size=lowerCAmelCase__ , predict_with_generate=lowerCAmelCase__ , evaluation_strategy="""steps""" , do_train=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase : Any = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# start training
trainer.train()
| 156
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = 0
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : int = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : List[Any] = Path(_lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = Path(_lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Path(_lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Path(_lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE__ : str = AutoImageProcessor.from_pretrained(_lowercase ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE__ : List[str] = CLIPImageProcessor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ : List[str] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = Path(_lowercase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
_lowercase , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained('''clip-base''' )
def lowercase__ ( self : str ):
with self.assertRaisesRegex(
_lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained(_lowercase , revision='''aaaaaa''' )
def lowercase__ ( self : List[str] ):
with self.assertRaisesRegex(
_lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase__ ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def lowercase__ ( self : List[str] ):
try:
AutoConfig.register('''custom''' , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoImageProcessor.register(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : int = Path(_lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomImageProcessor.from_pretrained(_lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Optional[Any] ):
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = True
try:
AutoConfig.register('''custom''' , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_lowercase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 35
|
"""simple docstring"""
from math import isqrt, loga
def _snake_case ( __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowerCamelCase : Optional[int] = False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = degree * loga(__snake_case )
_lowerCamelCase : Union[str, Any] = int(__snake_case )
_lowerCamelCase : Dict = calculate_prime_numbers(__snake_case )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = len(__snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88
| 0
|
from __future__ import annotations
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 613
|
import pytest
import datasets
# Import fixture modules as plugins
_lowerCamelCase = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def __UpperCAmelCase( lowercase_ , lowercase_ ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __UpperCAmelCase( lowercase_ ):
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
_lowerCamelCase : Optional[Any] = tmp_path_factory.getbasetemp() / '''cache'''
_lowerCamelCase : Optional[Any] = test_hf_cache_home / '''datasets'''
_lowerCamelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
_lowerCamelCase : Dict = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
_lowerCamelCase : str = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
_lowerCamelCase : Optional[int] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __UpperCAmelCase( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __UpperCAmelCase( lowercase_ ):
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __UpperCAmelCase( lowercase_ ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 613
| 1
|
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> List[Any]:
return int((input_a, input_a).count(0 ) == 0 )
def a_ ( ) -> List[Any]:
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 286
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase : Optional[Any] = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
UpperCAmelCase : Any = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
UpperCAmelCase : Union[str, Any] = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
def remove_articles(SCREAMING_SNAKE_CASE : Optional[Any] ):
a__ : Any =re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(SCREAMING_SNAKE_CASE , " " , SCREAMING_SNAKE_CASE )
def white_space_fix(SCREAMING_SNAKE_CASE : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE : List[Any] ):
a__ : List[Any] =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE ) ) ) )
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return int(normalize_answer(SCREAMING_SNAKE_CASE ) == normalize_answer(SCREAMING_SNAKE_CASE ) )
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Any =[any(compute_exact(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
return (sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE )) * 100
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
a__ : str =[rgram for rgrams in rgramslist for rgram in rgrams]
a__ : List[str] =Counter(SCREAMING_SNAKE_CASE )
a__ : List[Any] =Counter(SCREAMING_SNAKE_CASE )
a__ : Any =Counter()
for sgram, scount in sgramcounter.items():
a__ : List[str] =scount * numref
a__ : List[str] =Counter(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =Counter()
for cgram, ccount in cgramcounter.items():
a__ : int =ccount * numref
# KEEP
a__ : Any =sgramcounter_rep & cgramcounter_rep
a__ : List[str] =keepgramcounter_rep & rgramcounter
a__ : str =sgramcounter_rep & rgramcounter
a__ : Optional[int] =0
a__ : List[Any] =0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a__ : Tuple =1
a__ : Dict =1
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ : List[str] =keeptmpscorea / len(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
a__ : int =keeptmpscorea / sum(keepgramcounterall_rep.values() )
a__ : Tuple =0
if keepscore_precision > 0 or keepscore_recall > 0:
a__ : Any =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
a__ : Optional[Any] =sgramcounter_rep - cgramcounter_rep
a__ : Optional[Any] =delgramcounter_rep - rgramcounter
a__ : Optional[int] =sgramcounter_rep - rgramcounter
a__ : int =0
a__ : Dict =0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a__ : Any =1
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ : Optional[Any] =deltmpscorea / len(SCREAMING_SNAKE_CASE )
# ADDITION
a__ : Union[str, Any] =set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
a__ : List[Any] =set(SCREAMING_SNAKE_CASE ) & set(SCREAMING_SNAKE_CASE )
a__ : Tuple =set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
a__ : Any =0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a__ : int =1
a__ : Dict =1
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ : Optional[int] =addtmpscore / len(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ : List[str] =addtmpscore / len(SCREAMING_SNAKE_CASE )
a__ : List[str] =0
if addscore_precision > 0 or addscore_recall > 0:
a__ : Optional[Any] =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : int =len(SCREAMING_SNAKE_CASE )
a__ : Tuple =ssent.split(" " )
a__ : str =csent.split(" " )
a__ : List[Any] =[]
a__ : int =[]
a__ : List[Any] =[]
a__ : Any =[]
a__ : List[Any] =[]
a__ : Any =[]
a__ : Union[str, Any] =[]
a__ : Union[str, Any] =[]
a__ : Union[str, Any] =[]
a__ : Tuple =[]
for rsent in rsents:
a__ : Optional[int] =rsent.split(" " )
a__ : Tuple =[]
a__ : Tuple =[]
a__ : int =[]
ragramslist.append(SCREAMING_SNAKE_CASE )
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE ) - 1:
a__ : Union[str, Any] =ragrams[i] + " " + ragrams[i + 1]
ragrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 2:
a__ : Optional[int] =ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 3:
a__ : List[Any] =ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(SCREAMING_SNAKE_CASE )
ragramslist.append(SCREAMING_SNAKE_CASE )
ragramslist.append(SCREAMING_SNAKE_CASE )
ragramslist.append(SCREAMING_SNAKE_CASE )
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE ) - 1:
a__ : str =sagrams[i] + " " + sagrams[i + 1]
sagrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 2:
a__ : Dict =sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 3:
a__ : Any =sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(SCREAMING_SNAKE_CASE )
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE ) - 1:
a__ : List[Any] =cagrams[i] + " " + cagrams[i + 1]
cagrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 2:
a__ : List[str] =cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 3:
a__ : Optional[int] =cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(SCREAMING_SNAKE_CASE )
((a__) , (a__) , (a__)) : Optional[Any] =SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
((a__) , (a__) , (a__)) : Dict =SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
((a__) , (a__) , (a__)) : List[str] =SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
((a__) , (a__) , (a__)) : Dict =SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Tuple =sum([keepascore, keepascore, keepascore, keepascore] ) / 4
a__ : Tuple =sum([delascore, delascore, delascore, delascore] ) / 4
a__ : int =sum([addascore, addascore, addascore, addascore] ) / 4
a__ : Optional[int] =(avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _A ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "13a" , SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
if lowercase:
a__ : Optional[int] =sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
a__ : Any =sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE )()(SCREAMING_SNAKE_CASE )
else:
a__ : Any =sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
a__ : Dict =sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE , return_str=SCREAMING_SNAKE_CASE , escape=SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
a__ : Optional[int] =sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE , return_str=SCREAMING_SNAKE_CASE )
else:
a__ : Dict =sentence
if not return_str:
a__ : List[Any] =normalized_sent.split()
return normalized_sent
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if not (len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )):
raise ValueError("Sources length must match predictions and references lengths." )
a__ : Dict =0
for src, pred, refs in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE ) , normalize(SCREAMING_SNAKE_CASE ) , [normalize(SCREAMING_SNAKE_CASE ) for sent in refs] )
a__ : Tuple =sari_score / len(SCREAMING_SNAKE_CASE )
return 100 * sari_score
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple="exp" , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=False , ):
"""simple docstring"""
a__ : int =len(references[0] )
if any(len(SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
a__ : List[str] =[[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE )]
a__ : Optional[int] =sacrebleu.corpus_bleu(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , smooth_method=SCREAMING_SNAKE_CASE , smooth_value=SCREAMING_SNAKE_CASE , force=SCREAMING_SNAKE_CASE , lowercase=SCREAMING_SNAKE_CASE , use_effective_order=SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowerCAmelCase ( datasets.Metric):
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] ={}
result.update({"sari": compute_sari(sources=lowerCAmelCase__ , predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} )
result.update({"exact": compute_em(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} )
return result
| 563
| 0
|
import re
from filelock import FileLock
try:
import nltk
lowercase = True
except (ImportError, ModuleNotFoundError):
lowercase = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __UpperCAmelCase ( a_):
re.sub('<n>' , '' , a_) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(a_))
| 607
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase = "\nHuman: <<task>>\n\nAssistant: "
lowercase = "huggingface-tools/default-prompts"
lowercase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __UpperCAmelCase ( a_ , a_ , a_="run"):
if prompt_or_repo_id is None:
snake_case_ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , a_) is not None:
return prompt_or_repo_id
snake_case_ = cached_file(
a_ , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name})
with open(a_ , 'r' , encoding='utf-8') as f:
return f.read()
| 607
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] ): # noqa: E741
'''simple docstring'''
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = 0
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
lowerCAmelCase = [False] * n
def dfs(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
if parent == root:
out_edge_count += 1
lowerCAmelCase = True
lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowerCAmelCase = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
lowerCAmelCase = True
else:
lowerCAmelCase = min(low[at] , SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(SCREAMING_SNAKE_CASE ):
if not visited[i]:
lowerCAmelCase = 0
lowerCAmelCase = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , -1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase = out_edge_count > 1
for x in range(len(SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(SCREAMING_SNAKE_CASE )
# Adjacency list of graph
SCREAMING_SNAKE_CASE__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 532
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'visual_bert'
def __init__( self , lowercase=30_522 , lowercase=768 , lowercase=512 , lowercase=12 , lowercase=12 , lowercase=3_072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=False , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ) -> int:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = visual_embedding_dim
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = bypass_transformer
lowerCAmelCase = special_visual_initialize
| 532
| 1
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = TypeVar('DatasetType', Dataset, IterableDataset)
def __UpperCAmelCase( lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(lowercase_ ):
if not isinstance(lowercase_ , (Dataset, IterableDataset) ):
if isinstance(lowercase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(lowercase_ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowercase_ ) )}\']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase_ ).__name__}.""" )
if i == 0:
_lowerCamelCase, _lowerCamelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowercase_ , lowercase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase_ , lowercase_ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase_ , lowercase_ , lowercase_ , info=lowercase_ , split=lowercase_ , stopping_strategy=lowercase_ )
else:
return _interleave_iterable_datasets(
lowercase_ , lowercase_ , lowercase_ , info=lowercase_ , split=lowercase_ , stopping_strategy=lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(lowercase_ ):
if not isinstance(lowercase_ , (Dataset, IterableDataset) ):
if isinstance(lowercase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(lowercase_ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowercase_ ) )}\']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase_ ).__name__}.""" )
if i == 0:
_lowerCamelCase, _lowerCamelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowercase_ , lowercase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase_ , lowercase_ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase_ , info=lowercase_ , split=lowercase_ , axis=lowercase_ )
else:
return _concatenate_iterable_datasets(lowercase_ , info=lowercase_ , split=lowercase_ , axis=lowercase_ )
| 717
|
import warnings
from .generation import TFGenerationMixin
class __A ( lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" ,lowerCamelCase__ ,)
| 613
| 0
|
UpperCAmelCase : Optional[int] = tuple[float, float, float]
UpperCAmelCase : int = tuple[float, float, float]
def __lowerCamelCase ( lowerCamelCase__ : Pointad , lowerCamelCase__ : Pointad ):
'''simple docstring'''
lowerCamelCase = end_pointa[0] - end_pointa[0]
lowerCamelCase = end_pointa[1] - end_pointa[1]
lowerCamelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCamelCase ( lowerCamelCase__ : Vectorad , lowerCamelCase__ : Vectorad ):
'''simple docstring'''
lowerCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowerCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowerCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCamelCase ( lowerCamelCase__ : Vectorad , lowerCamelCase__ : int ):
'''simple docstring'''
return tuple(round(snake_case__ , snake_case__ ) for x in vector ) == (0, 0, 0)
def __lowerCamelCase ( lowerCamelCase__ : Pointad , lowerCamelCase__ : Pointad , lowerCamelCase__ : Pointad , lowerCamelCase__ : int = 10 ):
'''simple docstring'''
lowerCamelCase = create_vector(snake_case__ , snake_case__ )
lowerCamelCase = create_vector(snake_case__ , snake_case__ )
return is_zero_vector(get_ad_vectors_cross(snake_case__ , snake_case__ ) , snake_case__ )
| 457
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Any = data
snake_case__ : Node | None = None
class snake_case :
"""simple docstring"""
def __init__( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = None
snake_case__ : int = None
def __iter__( self ) -> Iterator[Any]:
"""simple docstring"""
snake_case__ : Dict = self.head
while self.head:
yield node.data
snake_case__ : str = node.next
if node == self.head:
break
def __len__( self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ) -> Optional[int]:
"""simple docstring"""
return "->".join(str(lowerCamelCase ) for item in iter(self ) )
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
self.insert_nth(0 , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
snake_case__ : Optional[int] = Node(lowerCamelCase )
if self.head is None:
snake_case__ : Tuple = new_node # first node points itself
snake_case__ : Union[str, Any] = new_node
elif index == 0: # insert at head
snake_case__ : Any = self.head
snake_case__ : Any = new_node
else:
snake_case__ : Optional[Any] = self.head
for _ in range(index - 1 ):
snake_case__ : List[Any] = temp.next
snake_case__ : Dict = temp.next
snake_case__ : str = new_node
if index == len(self ) - 1: # insert at tail
snake_case__ : Optional[Any] = new_node
def lowercase__ ( self ) -> int:
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , lowerCamelCase = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
snake_case__ : Union[str, Any] = self.head
if self.head == self.tail: # just one node
snake_case__ : int = None
elif index == 0: # delete head node
snake_case__ : Dict = self.tail.next.next
snake_case__ : int = self.head.next
else:
snake_case__ : Dict = self.head
for _ in range(index - 1 ):
snake_case__ : Any = temp.next
snake_case__ : Dict = temp.next
snake_case__ : str = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case__ : List[Any] = temp
return delete_node.data
def lowercase__ ( self ) -> bool:
"""simple docstring"""
return len(self ) == 0
def _A ( ):
snake_case__ : int = CircularLinkedList()
assert len(snake_case__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case__ ) == i
circular_linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261
| 0
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : List[str]=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : Any=5 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Dict=64 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : int=1 , ):
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE : str = type_sequence_label_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : List[Any] = q_groups
SCREAMING_SNAKE_CASE : Any = k_groups
SCREAMING_SNAKE_CASE : Tuple = v_groups
SCREAMING_SNAKE_CASE : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_groups
SCREAMING_SNAKE_CASE : str = output_groups
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : int = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : List[Any] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _A ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : List[Any] = SqueezeBertModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Dict = SqueezeBertForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = SqueezeBertForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : int = SqueezeBertForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = SqueezeBertForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE : Tuple = SqueezeBertForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ : List[Any] = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : Optional[Any] = False
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : List[str] = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=UpperCAmelCase_ , dim=37 )
def _A ( self : str ):
self.config_tester.run_common_tests()
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCAmelCase_ )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCAmelCase_ )
def _A ( self : str ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCAmelCase_ )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCAmelCase_ )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCAmelCase_ )
@slow
def _A ( self : Dict ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = SqueezeBertModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : str ):
SCREAMING_SNAKE_CASE : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : str = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-4 ) )
| 488
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488
| 1
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase_ = {'''facebook/blenderbot_small-90M''': 512}
def snake_case ( A__ ):
UpperCAmelCase_ : str = set()
UpperCAmelCase_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Dict = char
UpperCAmelCase_ : List[Any] = set(A__ )
return pairs
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]="__start__" , lowerCAmelCase_ : str="__end__" , lowerCAmelCase_ : Union[str, Any]="__unk__" , lowerCAmelCase_ : Union[str, Any]="__null__" , **lowerCAmelCase_ : List[Any] , ) -> str:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : str = json.load(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ : str = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase_ : Any = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
UpperCAmelCase_ : Any = {}
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : str ) -> str:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Dict = re.sub("([.,!?()])" , R" \1" , lowerCAmelCase_ )
UpperCAmelCase_ : int = re.sub("(')" , R" \1 " , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = re.sub(R"\s{2,}" , " " , lowerCAmelCase_ )
if "\n" in token:
UpperCAmelCase_ : Tuple = token.replace("\n" , " __newln__" )
UpperCAmelCase_ : Tuple = token.split(" " )
UpperCAmelCase_ : int = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
UpperCAmelCase_ : Any = token.lower()
UpperCAmelCase_ : List[str] = tuple(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase_ : List[Any] = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Dict = bigram
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Dict = 0
while i < len(lowerCAmelCase_ ):
try:
UpperCAmelCase_ : int = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
UpperCAmelCase_ : Any = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Dict = tuple(lowerCAmelCase_ )
UpperCAmelCase_ : Any = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = "@@ ".join(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = word[:-4]
UpperCAmelCase_ : Optional[Any] = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[Any] = re.findall(R"\S+\n?" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(" " ) ) )
return split_tokens
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str ) -> int:
UpperCAmelCase_ : List[Any] = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[str] ) -> str:
UpperCAmelCase_ : List[str] = " ".join(lowerCAmelCase_ ).replace("@@ " , "" ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Union[str, Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + "\n" )
UpperCAmelCase_ : Any = 0
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ : List[Any] = token_index
writer.write(" ".join(lowerCAmelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
| 95
| 0
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669
| 1
|
from __future__ import annotations
class _snake_case :
def __init__( self , a) -> None:
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def lowerCamelCase__ (_UpperCAmelCase): # In Order traversal of the tree
if tree:
display(tree.left)
print(tree.data)
display(tree.right)
def lowerCamelCase__ (_UpperCAmelCase):
return 1 + max(depth_of_tree(tree.left) , depth_of_tree(tree.right)) if tree else 0
def lowerCamelCase__ (_UpperCAmelCase):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right)
else:
return not tree.left and not tree.right
def lowerCamelCase__ (): # Main function for testing.
SCREAMING_SNAKE_CASE = Node(1)
SCREAMING_SNAKE_CASE = Node(2)
SCREAMING_SNAKE_CASE = Node(3)
SCREAMING_SNAKE_CASE = Node(4)
SCREAMING_SNAKE_CASE = Node(5)
SCREAMING_SNAKE_CASE = Node(6)
SCREAMING_SNAKE_CASE = Node(7)
SCREAMING_SNAKE_CASE = Node(8)
SCREAMING_SNAKE_CASE = Node(9)
print(is_full_binary_tree(_UpperCAmelCase))
print(depth_of_tree(_UpperCAmelCase))
print('Tree is: ')
display(_UpperCAmelCase)
if __name__ == "__main__":
main()
| 73
|
"""simple docstring"""
_lowerCAmelCase = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 180
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
_a = BertConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
_a = BertForPreTraining(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 532
|
'''simple docstring'''
def _A (lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> List[str]:
'''simple docstring'''
_a = [[float('inf' ) for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
_a = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCAmelCase__ ):
# looping through rows of graph array
for i in range(lowerCAmelCase__ ):
# looping through columns of graph array
for j in range(lowerCAmelCase__ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_a = dist[i][k] + dist[k][j]
_print_dist(lowerCAmelCase__ , lowerCAmelCase__ )
return dist, v
if __name__ == "__main__":
a_ : Optional[Any] = int(input("Enter number of vertices: "))
a_ : List[Any] = int(input("Enter number of edges: "))
a_ : Dict = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
a_ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
a_ : Union[str, Any] = int(input("Enter source:"))
a_ : int = int(input("Enter destination:"))
a_ : Tuple = float(input("Enter weight:"))
a_ : Dict = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 532
| 1
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
UpperCamelCase_ = "sshleifer/mar_enro_6_3_student"
class _SCREAMING_SNAKE_CASE ( snake_case__ ):
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
super().setUp()
A : int = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=__lowercase , )
A : Dict = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
MarianMTModel.from_pretrained(__lowercase )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : Tuple = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
A : str = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
A : Union[str, Any] = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
A : Tuple = bash_script.replace(__lowercase , str(__lowercase ) )
A : Union[str, Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
A : List[Any] = f"""\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n """.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
A : str = ['''finetune.py'''] + bash_script.split() + args
with patch.object(__lowercase , '''argv''' , __lowercase ):
A : str = argparse.ArgumentParser()
A : Dict = pl.Trainer.add_argparse_args(__lowercase )
A : List[Any] = SummarizationModule.add_model_specific_args(__lowercase , os.getcwd() )
A : int = parser.parse_args()
A : Tuple = main(__lowercase )
# Check metrics
A : Union[str, Any] = load_json(model.metrics_save_path )
A : Any = metrics['''val'''][0]
A : int = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , __lowercase )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
A : Any = os.listdir(__lowercase )
A : Tuple = [x for x in contents if x.endswith('''.ckpt''' )][0]
A : int = os.path.join(args.output_dir , __lowercase )
A : List[str] = torch.load(__lowercase , map_location='''cpu''' )
A : str = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
A : str = {os.path.basename(__lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class _SCREAMING_SNAKE_CASE ( snake_case__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : int = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
A : List[Any] = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
A : Any = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
A : Optional[Any] = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
A : Union[str, Any] = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
A : str = bash_script.replace(__lowercase , str(__lowercase ) )
A : Any = self.get_auto_remove_tmp_dir()
A : Optional[int] = bash_script.replace('''--fp16''' , '''''' )
A : Tuple = 6
A : str = (
['''distillation.py''']
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"""--num_train_epochs={epochs}""",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(__lowercase , '''argv''' , __lowercase ):
A : int = argparse.ArgumentParser()
A : int = pl.Trainer.add_argparse_args(__lowercase )
A : Optional[int] = SummarizationDistiller.add_model_specific_args(__lowercase , os.getcwd() )
A : Union[str, Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
A : Union[str, Any] = distill_main(__lowercase )
# Check metrics
A : Dict = load_json(model.metrics_save_path )
A : List[str] = metrics['''val'''][0]
A : Tuple = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , __lowercase )
# check lightning ckpt can be loaded and has a reasonable statedict
A : int = os.listdir(__lowercase )
A : str = [x for x in contents if x.endswith('''.ckpt''' )][0]
A : Union[str, Any] = os.path.join(args.output_dir , __lowercase )
A : Optional[int] = torch.load(__lowercase , map_location='''cpu''' )
A : int = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
A : int = {os.path.basename(__lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 256
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__lowerCamelCase : Tuple =torch.exp(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict =torch.sum(SCREAMING_SNAKE_CASE , dim=1 ) # sum of exp(x_i)
__lowerCamelCase : Optional[int] =torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(SCREAMING_SNAKE_CASE ) - B / A
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self :List[str] , __lowercase :int ):
super().__init__()
__lowerCamelCase : str =config.output_attentions
__lowerCamelCase : List[Any] =config.output_hidden_states
__lowerCamelCase : Dict =nn.ModuleList([BertLayer(__lowercase ) for _ in range(config.num_hidden_layers )] )
__lowerCamelCase : str =nn.ModuleList([BertHighway(__lowercase ) for _ in range(config.num_hidden_layers )] )
__lowerCamelCase : Optional[Any] =[-1 for _ in range(config.num_hidden_layers )]
def __lowercase ( self :Union[str, Any] , __lowercase :Union[str, Any] ):
if (type(__lowercase ) is float) or (type(__lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__lowerCamelCase : Tuple =x
else:
__lowerCamelCase : Any =x
def __lowercase ( self :Union[str, Any] , __lowercase :Tuple ):
__lowerCamelCase : Union[str, Any] =pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __lowercase ( self :Tuple , __lowercase :Optional[int] , __lowercase :Dict=None , __lowercase :Union[str, Any]=None , __lowercase :List[str]=None , __lowercase :str=None , ):
__lowerCamelCase : Any =()
__lowerCamelCase : List[str] =()
__lowerCamelCase : Optional[int] =()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__lowerCamelCase : int =all_hidden_states + (hidden_states,)
__lowerCamelCase : List[Any] =layer_module(
__lowercase , __lowercase , head_mask[i] , __lowercase , __lowercase )
__lowerCamelCase : Optional[int] =layer_outputs[0]
if self.output_attentions:
__lowerCamelCase : Optional[Any] =all_attentions + (layer_outputs[1],)
__lowerCamelCase : Any =(hidden_states,)
if self.output_hidden_states:
__lowerCamelCase : Optional[Any] =current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCamelCase : Dict =current_outputs + (all_attentions,)
__lowerCamelCase : str =self.highway[i](__lowercase )
# logits, pooled_output
if not self.training:
__lowerCamelCase : Tuple =highway_exit[0]
__lowerCamelCase : Tuple =entropy(__lowercase )
__lowerCamelCase : Tuple =highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowerCamelCase : Optional[int] =all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowerCamelCase : Dict =(highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowercase , i + 1 )
else:
__lowerCamelCase : Union[str, Any] =all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowerCamelCase : Optional[Any] =all_hidden_states + (hidden_states,)
__lowerCamelCase : List[Any] =(hidden_states,)
if self.output_hidden_states:
__lowerCamelCase : Tuple =outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCamelCase : Optional[int] =outputs + (all_attentions,)
__lowerCamelCase : int =outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :str ):
super().__init__(__lowercase )
__lowerCamelCase : Union[str, Any] =config
__lowerCamelCase : List[str] =BertEmbeddings(__lowercase )
__lowerCamelCase : Dict =DeeBertEncoder(__lowercase )
__lowerCamelCase : List[Any] =BertPooler(__lowercase )
self.init_weights()
def __lowercase ( self :Tuple ):
self.encoder.init_highway_pooler(self.pooler )
def __lowercase ( self :Dict ):
return self.embeddings.word_embeddings
def __lowercase ( self :List[str] , __lowercase :int ):
__lowerCamelCase : Union[str, Any] =value
def __lowercase ( self :List[Any] , __lowercase :Dict ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowercase )
@add_start_docstrings_to_model_forward(__lowercase )
def __lowercase ( self :Optional[Any] , __lowercase :List[str]=None , __lowercase :List[Any]=None , __lowercase :Any=None , __lowercase :Tuple=None , __lowercase :Union[str, Any]=None , __lowercase :Optional[Any]=None , __lowercase :Union[str, Any]=None , __lowercase :Tuple=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__lowerCamelCase : List[str] =input_ids.size()
elif inputs_embeds is not None:
__lowerCamelCase : str =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__lowerCamelCase : Optional[int] =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCamelCase : str =torch.ones(__lowercase , device=__lowercase )
if encoder_attention_mask is None:
__lowerCamelCase : Tuple =torch.ones(__lowercase , device=__lowercase )
if token_type_ids is None:
__lowerCamelCase : List[Any] =torch.zeros(__lowercase , dtype=torch.long , device=__lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCamelCase : torch.Tensor =self.get_extended_attention_mask(__lowercase , __lowercase , __lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowerCamelCase : List[str] =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowerCamelCase : Any =encoder_attention_mask[:, None, None, :]
__lowerCamelCase : Optional[Any] =encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__lowerCamelCase : List[str] =(1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCamelCase : Union[str, Any] =self.get_head_mask(__lowercase , self.config.num_hidden_layers )
__lowerCamelCase : str =self.embeddings(
input_ids=__lowercase , position_ids=__lowercase , token_type_ids=__lowercase , inputs_embeds=__lowercase )
__lowerCamelCase : Dict =self.encoder(
__lowercase , attention_mask=__lowercase , head_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__lowerCamelCase : int =encoder_outputs[0]
__lowerCamelCase : Tuple =self.pooler(__lowercase )
__lowerCamelCase : int =(
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :List[Any] , __lowercase :Optional[Any] , __lowercase :Dict ):
__lowerCamelCase : List[Any] =message
__lowerCamelCase : int =exit_layer # start from 1!
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Any , __lowercase :str ):
super().__init__()
__lowerCamelCase : str =BertPooler(__lowercase )
__lowerCamelCase : Union[str, Any] =nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase : List[str] =nn.Linear(config.hidden_size , config.num_labels )
def __lowercase ( self :Union[str, Any] , __lowercase :List[str] ):
# Pooler
__lowerCamelCase : Optional[Any] =encoder_outputs[0]
__lowerCamelCase : Any =self.pooler(__lowercase )
# "return" pooler_output
# BertModel
__lowerCamelCase : List[str] =(pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowerCamelCase : List[Any] =bmodel_output[1]
__lowerCamelCase : Optional[Any] =self.dropout(__lowercase )
__lowerCamelCase : int =self.classifier(__lowercase )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :Dict ):
super().__init__(__lowercase )
__lowerCamelCase : Any =config.num_labels
__lowerCamelCase : int =config.num_hidden_layers
__lowerCamelCase : Tuple =DeeBertModel(__lowercase )
__lowerCamelCase : Optional[int] =nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase : Optional[int] =nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowercase )
def __lowercase ( self :List[str] , __lowercase :List[str]=None , __lowercase :str=None , __lowercase :Optional[Any]=None , __lowercase :List[Any]=None , __lowercase :Union[str, Any]=None , __lowercase :Dict=None , __lowercase :int=None , __lowercase :int=-1 , __lowercase :List[str]=False , ):
__lowerCamelCase : Union[str, Any] =self.num_layers
try:
__lowerCamelCase : Union[str, Any] =self.bert(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowerCamelCase : List[Any] =outputs[1]
__lowerCamelCase : Optional[Any] =self.dropout(__lowercase )
__lowerCamelCase : Tuple =self.classifier(__lowercase )
__lowerCamelCase : int =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCamelCase : Union[str, Any] =e.message
__lowerCamelCase : Optional[Any] =e.exit_layer
__lowerCamelCase : Any =outputs[0]
if not self.training:
__lowerCamelCase : List[Any] =entropy(__lowercase )
__lowerCamelCase : Union[str, Any] =[]
__lowerCamelCase : int =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase : Union[str, Any] =MSELoss()
__lowerCamelCase : List[Any] =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase : Dict =CrossEntropyLoss()
__lowerCamelCase : List[Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowerCamelCase : str =[]
for highway_exit in outputs[-1]:
__lowerCamelCase : List[str] =highway_exit[0]
if not self.training:
highway_logits_all.append(__lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase : Optional[int] =MSELoss()
__lowerCamelCase : Optional[Any] =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase : int =CrossEntropyLoss()
__lowerCamelCase : int =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowercase )
if train_highway:
__lowerCamelCase : Dict =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowerCamelCase : List[str] =(loss,) + outputs
if not self.training:
__lowerCamelCase : List[Any] =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCamelCase : Dict =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 179
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ = None , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = path_or_paths
__snake_case : str = split if split or isinstance(a_ , a_ ) else '''train'''
__snake_case : int = features
__snake_case : List[Any] = cache_dir
__snake_case : Optional[Any] = keep_in_memory
__snake_case : Union[str, Any] = streaming
__snake_case : int = num_proc
__snake_case : Tuple = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , **a_ , ):
'''simple docstring'''
__snake_case : str = features
__snake_case : str = cache_dir
__snake_case : List[Any] = keep_in_memory
__snake_case : Union[str, Any] = streaming
__snake_case : int = num_proc
__snake_case : Dict = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
| 229
|
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
__snake_case , __snake_case : str = head.next, head
while fast and fast.next:
__snake_case : List[str] = fast.next.next
__snake_case : Optional[int] = slow.next
__snake_case : Tuple = slow.next
__snake_case : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
__snake_case : Union[str, Any] = None
while second:
__snake_case : Optional[Any] = second.next
__snake_case : List[str] = node
__snake_case : List[Any] = second
__snake_case : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__snake_case : int = node.next
__snake_case : Any = head.next
return True
def lowercase ( _snake_case : int ) ->Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__snake_case : Optional[int] = head
while fast and fast.next:
__snake_case , __snake_case : Union[str, Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
__snake_case : int = [slow.val]
while slow.next:
__snake_case : str = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__snake_case : str = cur.next
return True
def lowercase ( _snake_case : str ) ->Dict:
"""simple docstring"""
if not head or not head.next:
return True
__snake_case : Optional[int] = {}
__snake_case : Dict = 0
while head:
if head.val in d:
d[head.val].append(_snake_case )
else:
__snake_case : Tuple = [pos]
__snake_case : str = head.next
pos += 1
__snake_case : str = pos - 1
__snake_case : Union[str, Any] = 0
for v in d.values():
if len(_snake_case ) % 2 != 0:
middle += 1
else:
__snake_case : Tuple = 0
for i in range(0 , len(_snake_case ) ):
if v[i] + v[len(_snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 229
| 1
|
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = os.path.abspath(A__ )
logger.info(F"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__lowercase = tf.train.list_variables(A__ )
__lowercase = []
__lowercase = []
__lowercase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__lowercase = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(F"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__lowercase = name[1:]
# figure out how many levels deep the name is
__lowercase = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(A__ )
# read data
__lowercase = tf.train.load_variable(A__ , A__ )
names.append('''/'''.join(A__ ) )
arrays.append(A__ )
logger.info(F"Read a total of {len(A__ ):,} layers" )
# Sanity check
if len(set(A__ ) ) != 1:
raise ValueError(F"Found layer names with different depths (layer depth {list(set(A__ ) )})" )
__lowercase = list(set(A__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(A__ , A__ ):
__lowercase = full_name.split('''/''' )
__lowercase = model
__lowercase = []
for i, m_name in enumerate(A__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__lowercase = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__lowercase = getattr(A__ , '''embeddings''' )
__lowercase = getattr(A__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__lowercase = getattr(A__ , '''encoder''' )
__lowercase = getattr(A__ , '''layer''' )
__lowercase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__lowercase = getattr(A__ , '''pooler''' )
__lowercase = getattr(A__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__lowercase = getattr(A__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__lowercase = getattr(A__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__lowercase = getattr(A__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__lowercase = getattr(A__ , '''token_type_embeddings''' )
else:
raise ValueError(F"Unknown embedding layer with name {full_name}" )
trace.append('''weight''' )
__lowercase = getattr(A__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__lowercase = getattr(A__ , '''attention''' )
__lowercase = getattr(A__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__lowercase = getattr(A__ , '''attention''' )
__lowercase = getattr(A__ , '''output''' )
__lowercase = getattr(A__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__lowercase = getattr(A__ , '''attention''' )
__lowercase = getattr(A__ , '''output''' )
__lowercase = getattr(A__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__lowercase = getattr(A__ , '''output''' )
__lowercase = getattr(A__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__lowercase = getattr(A__ , '''output''' )
__lowercase = getattr(A__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__lowercase = getattr(A__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__lowercase = getattr(A__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__lowercase = getattr(A__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__lowercase = getattr(A__ , '''intermediate''' )
__lowercase = getattr(A__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__lowercase = getattr(A__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__lowercase = getattr(A__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__lowercase = getattr(A__ , '''weight''' )
else:
logger.warning(F"Ignored {m_name}" )
# for certain layers reshape is necessary
__lowercase = '''.'''.join(A__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , A__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , A__ ):
__lowercase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__lowercase = array.transpose()
if pointer.shape == array.shape:
__lowercase = torch.from_numpy(A__ )
else:
raise ValueError(
F"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
F" {array.shape}" )
logger.info(F"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
logger.info(F"Loading model based on config from {config_path}..." )
__lowercase = BertConfig.from_json_file(A__ )
__lowercase = BertModel(A__ )
# Load weights from checkpoint
logger.info(F"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(A__ , A__ , A__ )
# Save pytorch-model
logger.info(F"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
lowerCAmelCase__ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 41
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 310
| 0
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a_ : Optional[List[str]] = None
a_ : Optional[int] = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a_ : Dict = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class _snake_case :
_lowercase : bool = True
_lowercase : Optional[str] = None
# Automatically constructed
_lowercase : ClassVar[str] = "PIL.Image.Image"
_lowercase : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_lowercase : str = field(default='''Image''' , init=A__ , repr=A__ )
def __call__( self) -> List[Any]:
return self.pa_type
def SCREAMING_SNAKE_CASE__ ( self , a) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
if isinstance(a , a):
SCREAMING_SNAKE_CASE = np.array(a)
if isinstance(a , a):
return {"path": value, "bytes": None}
elif isinstance(a , a):
return {"path": None, "bytes": value}
elif isinstance(a , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a)
elif isinstance(a , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a)
elif value.get('path') is not None and os.path.isfile(value['path']):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path')}
elif value.get('bytes') is not None or value.get('path') is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes'), "path": value.get('path')}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''')
def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.')
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.')
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''')
else:
if is_local_path(a):
SCREAMING_SNAKE_CASE = PIL.Image.open(a)
else:
SCREAMING_SNAKE_CASE = path.split('::')[-1]
try:
SCREAMING_SNAKE_CASE = string_to_dict(a , config.HUB_DATASETS_URL)['repo_id']
SCREAMING_SNAKE_CASE = token_per_repo_id.get(a)
except ValueError:
SCREAMING_SNAKE_CASE = None
with xopen(a , 'rb' , use_auth_token=a) as f:
SCREAMING_SNAKE_CASE = BytesIO(f.read())
SCREAMING_SNAKE_CASE = PIL.Image.open(bytes_)
else:
SCREAMING_SNAKE_CASE = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE__ ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary'),
"path": Value('string'),
}
)
def SCREAMING_SNAKE_CASE__ ( self , a) -> pa.StructArray:
if pa.types.is_string(storage.type):
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.binary())
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.string())
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('bytes') >= 0:
SCREAMING_SNAKE_CASE = storage.field('bytes')
else:
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.binary())
if storage.type.get_field_index('path') >= 0:
SCREAMING_SNAKE_CASE = storage.field('path')
else:
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.string())
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
SCREAMING_SNAKE_CASE = pa.array(
[encode_np_array(np.array(a))['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.string())
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(a , self.pa_type)
def SCREAMING_SNAKE_CASE__ ( self , a) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(a):
with xopen(a , 'rb') as f:
SCREAMING_SNAKE_CASE = f.read()
return bytes_
SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(a) if path is not None else None for path in storage.field('path').to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(a , self.pa_type)
def lowerCamelCase__ ():
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE = image.format
else:
SCREAMING_SNAKE_CASE = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(_UpperCAmelCase , format=_UpperCAmelCase)
return buffer.getvalue()
def lowerCamelCase__ (_UpperCAmelCase):
if hasattr(_UpperCAmelCase , 'filename') and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCAmelCase)}
def lowerCamelCase__ (_UpperCAmelCase):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
SCREAMING_SNAKE_CASE = array.dtype
SCREAMING_SNAKE_CASE = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE = dtype.kind
SCREAMING_SNAKE_CASE = dtype.itemsize
SCREAMING_SNAKE_CASE = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE = np.dtype('|u1')
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''')
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''')
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE = dtype_byteorder + dtype_kind + str(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.dtype(_UpperCAmelCase)
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''')
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''')
SCREAMING_SNAKE_CASE = PIL.Image.fromarray(array.astype(_UpperCAmelCase))
return {"path": None, "bytes": image_to_bytes(_UpperCAmelCase)}
def lowerCamelCase__ (_UpperCAmelCase):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
if objs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = first_non_null_value(_UpperCAmelCase)
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCAmelCase , np.ndarray):
SCREAMING_SNAKE_CASE = no_op_if_value_is_null(_UpperCAmelCase)
return [obj_to_image_dict_func(_UpperCAmelCase) for obj in objs]
elif isinstance(_UpperCAmelCase , PIL.Image.Image):
SCREAMING_SNAKE_CASE = no_op_if_value_is_null(_UpperCAmelCase)
return [obj_to_image_dict_func(_UpperCAmelCase) for obj in objs]
else:
return objs
else:
return objs
| 700
|
import os
from collections.abc import Iterator
def lowerCamelCase__ (_UpperCAmelCase = "."):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase)[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase).lstrip('./')
def lowerCamelCase__ (_UpperCAmelCase):
return F'''{i * ' '}*''' if i else "\n##"
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = old_path.split(os.sep)
for i, new_part in enumerate(new_path.split(os.sep)):
if (i + 1 > len(_UpperCAmelCase) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(_UpperCAmelCase)} {new_part.replace('_' , ' ').title()}''')
return new_path
def lowerCamelCase__ (_UpperCAmelCase = "."):
SCREAMING_SNAKE_CASE = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase)):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = os.path.split(_UpperCAmelCase)
if filepath != old_path:
SCREAMING_SNAKE_CASE = print_path(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = (filepath.count(os.sep) + 1) if filepath else 0
SCREAMING_SNAKE_CASE = F'''{filepath}/{filename}'''.replace(' ' , '%20')
SCREAMING_SNAKE_CASE = os.path.splitext(filename.replace('_' , ' ').title())[0]
print(F'''{md_prefix(_UpperCAmelCase)} [{filename}]({url})''')
if __name__ == "__main__":
print_directory_md('.')
| 444
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : List[str] =TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
A__ : Tuple =tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
A__ : int =model(lowerCAmelCase_ )["""last_hidden_state"""]
A__ : Optional[Any] =tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice.
A__ : Optional[int] =tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 215
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int = 10, __snake_case : int = 22 ) -> int:
"""simple docstring"""
A__ : Any =range(1, __snake_case )
A__ : List[str] =range(1, __snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 215
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 481
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
'''simple docstring'''
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Tuple = constant_matrix.shape
if rowsa != colsa:
A_ : int = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if colsa != 1:
A_ : List[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if rowsa != rowsa:
A_ : str = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != rowsa:
A_ : Any = (
"""Number of initial values must be equal to number of rows in coefficient """
f"""matrix but received {len(_lowerCAmelCase )} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ : str = table.shape
strictly_diagonally_dominant(_lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase ):
A_ : Union[str, Any] = []
for row in range(_lowerCAmelCase ):
A_ : str = 0
for col in range(_lowerCAmelCase ):
if col == row:
A_ : Optional[Any] = table[row][col]
elif col == cols - 1:
A_ : List[str] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : str = (temp + val) / denom
new_val.append(_lowerCAmelCase )
A_ : List[str] = new_val
return [float(_lowerCAmelCase ) for i in new_val]
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ , A_ : str = table.shape
A_ : Any = True
for i in range(0 ,_lowerCAmelCase ):
A_ : Optional[Any] = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 481
| 1
|
"""simple docstring"""
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(_snake_case ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__SCREAMING_SNAKE_CASE = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_snake_case ):
return None
__SCREAMING_SNAKE_CASE = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__SCREAMING_SNAKE_CASE = left
__SCREAMING_SNAKE_CASE = point
elif point > right:
__SCREAMING_SNAKE_CASE = right
__SCREAMING_SNAKE_CASE = point
else:
if item < current_item:
__SCREAMING_SNAKE_CASE = point - 1
else:
__SCREAMING_SNAKE_CASE = point + 1
return None
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__SCREAMING_SNAKE_CASE = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_snake_case ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_snake_case , _snake_case , _snake_case , _snake_case )
elif point > right:
return interpolation_search_by_recursion(_snake_case , _snake_case , _snake_case , _snake_case )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_snake_case , _snake_case , _snake_case , point - 1 )
else:
return interpolation_search_by_recursion(
_snake_case , _snake_case , point + 1 , _snake_case )
def _a ( UpperCAmelCase__ ) -> Any:
if collection != sorted(_snake_case ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
lowerCAmelCase__ =0
if debug == 1:
lowerCAmelCase__ =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
lowerCAmelCase__ =67
lowerCAmelCase__ =interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print("Not found")
| 482
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any, snake_case :Optional[int], snake_case :Optional[Any]=7, snake_case :str=3, snake_case :Optional[int]=18, snake_case :str=30, snake_case :List[Any]=400, snake_case :Any=True, snake_case :Dict=None, snake_case :Any=True, snake_case :Dict=None, snake_case :List[Any]=True, snake_case :Optional[int]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], snake_case :Any=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], snake_case :int=True, ):
"""simple docstring"""
_lowercase =size if size is not None else {'height': 224, 'width': 224}
_lowercase =crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowercase =parent
_lowercase =batch_size
_lowercase =num_channels
_lowercase =image_size
_lowercase =min_resolution
_lowercase =max_resolution
_lowercase =do_resize
_lowercase =size
_lowercase =do_center_crop
_lowercase =crop_size
_lowercase =do_normalize
_lowercase =image_mean
_lowercase =image_std
_lowercase =do_convert_rgb
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self :Dict, snake_case :List[Any]=False, snake_case :Any=False, snake_case :Union[str, Any]=False):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_lowercase =[]
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
255, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uinta))
else:
_lowercase =[]
for i in range(self.batch_size):
_lowercase , _lowercase =np.random.choice(np.arange(self.min_resolution, self.max_resolution), 2)
image_inputs.append(np.random.randint(255, size=(self.num_channels, width, height), dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_lowercase =[Image.fromarray(np.moveaxis(snake_case, 0, -1)) for x in image_inputs]
if torchify:
_lowercase =[torch.from_numpy(snake_case) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =ChineseCLIPImageProcessingTester(self, do_center_crop=snake_case)
@property
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case, 'do_resize'))
self.assertTrue(hasattr(snake_case, 'size'))
self.assertTrue(hasattr(snake_case, 'do_center_crop'))
self.assertTrue(hasattr(snake_case, 'center_crop'))
self.assertTrue(hasattr(snake_case, 'do_normalize'))
self.assertTrue(hasattr(snake_case, 'image_mean'))
self.assertTrue(hasattr(snake_case, 'image_std'))
self.assertTrue(hasattr(snake_case, 'do_convert_rgb'))
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'height': 224, 'width': 224})
self.assertEqual(image_processor.crop_size, {'height': 18, 'width': 18})
_lowercase =self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84})
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
pass
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase =self.image_processor_tester.prepare_inputs(equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, Image.Image)
# Test not batched input
_lowercase =image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_lowercase =image_processing(snake_case, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase =self.image_processor_tester.prepare_inputs(equal_resolution=snake_case, numpify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, np.ndarray)
# Test not batched input
_lowercase =image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_lowercase =image_processing(snake_case, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase =self.image_processor_tester.prepare_inputs(equal_resolution=snake_case, torchify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, torch.Tensor)
# Test not batched input
_lowercase =image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_lowercase =image_processing(snake_case, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=snake_case)
_lowercase =3
@property
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case, 'do_resize'))
self.assertTrue(hasattr(snake_case, 'size'))
self.assertTrue(hasattr(snake_case, 'do_center_crop'))
self.assertTrue(hasattr(snake_case, 'center_crop'))
self.assertTrue(hasattr(snake_case, 'do_normalize'))
self.assertTrue(hasattr(snake_case, 'image_mean'))
self.assertTrue(hasattr(snake_case, 'image_std'))
self.assertTrue(hasattr(snake_case, 'do_convert_rgb'))
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
pass
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase =self.image_processor_tester.prepare_inputs(equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, Image.Image)
# Test not batched input
_lowercase =image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_lowercase =image_processing(snake_case, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
| 181
| 0
|
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ : str = logging.getLogger()
def __A ( a_ : Tuple )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_snake_case , '''all_results.json''' )
if os.path.exists(_snake_case ):
with open(_snake_case , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.load(_snake_case )
else:
raise ValueError(F"can\'t find {path}" )
return results
lowerCamelCase__ : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
import xla_spawn
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Optional[Any] = f"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = time()
xla_spawn.main()
SCREAMING_SNAKE_CASE : str = time()
SCREAMING_SNAKE_CASE : Any = get_results(lowerCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
import xla_spawn
SCREAMING_SNAKE_CASE : Tuple = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_ ):
xla_spawn.main()
| 707
|
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir('''fixtures''')
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 500
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=__UpperCamelCase ) as mock_head:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowercase_ ( cls : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def lowercase_ ( cls : Union[str, Any] ) -> int:
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def lowercase_ ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCamelCase , repo_id='''test-feature-extractor''' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCamelCase , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def lowercase_ ( self : str ) -> List[Any]:
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 493
|
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowercase ( a__ : str ) -> List[Any]:
return EnvironmentCommand()
class UpperCAmelCase_ ( _lowercase):
@staticmethod
def _UpperCamelCase ( __UpperCamelCase : ArgumentParser ) -> List[Any]:
_UpperCamelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=__UpperCamelCase )
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
_UpperCamelCase = huggingface_hub.__version__
_UpperCamelCase = '''not installed'''
_UpperCamelCase = '''NA'''
if is_torch_available():
import torch
_UpperCamelCase = torch.__version__
_UpperCamelCase = torch.cuda.is_available()
_UpperCamelCase = '''not installed'''
if is_transformers_available():
import transformers
_UpperCamelCase = transformers.__version__
_UpperCamelCase = '''not installed'''
if is_accelerate_available():
import accelerate
_UpperCamelCase = accelerate.__version__
_UpperCamelCase = '''not installed'''
if is_xformers_available():
import xformers
_UpperCamelCase = xformers.__version__
_UpperCamelCase = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__UpperCamelCase ) )
return info
@staticmethod
def _UpperCamelCase ( __UpperCamelCase : List[str] ) -> Dict:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 420
| 0
|
'''simple docstring'''
import operator as op
SCREAMING_SNAKE_CASE__ : str = '''scaler.pt'''
SCREAMING_SNAKE_CASE__ : int = '''pytorch_model'''
SCREAMING_SNAKE_CASE__ : Tuple = '''random_states'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''optimizer'''
SCREAMING_SNAKE_CASE__ : str = '''scheduler'''
SCREAMING_SNAKE_CASE__ : int = '''pytorch_model.bin'''
SCREAMING_SNAKE_CASE__ : Dict = '''pytorch_model.bin.index.json'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''model.safetensors'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''model.safetensors.index.json'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''1.10.2'''
SCREAMING_SNAKE_CASE__ : str = '''py38'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''4.17.0'''
SCREAMING_SNAKE_CASE__ : Tuple = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
SCREAMING_SNAKE_CASE__ : str = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
SCREAMING_SNAKE_CASE__ : List[str] = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''2.0.1'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
SCREAMING_SNAKE_CASE__ : int = ['''default''', '''reduce-overhead''', '''max-autotune''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
SCREAMING_SNAKE_CASE__ : List[Any] = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
SCREAMING_SNAKE_CASE__ : Any = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
SCREAMING_SNAKE_CASE__ : List[Any] = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 719
|
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
SCREAMING_SNAKE_CASE__ : Any = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
SCREAMING_SNAKE_CASE__ : List[str] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def a ( UpperCamelCase_ : Dict ) -> Union[str, Any]:
def remove_articles(UpperCamelCase_ : List[str] ):
snake_case__ =re.compile(r'\b(a|an|the)\b' , re.UNICODE )
return re.sub(UpperCamelCase_ , ' ' , UpperCamelCase_ )
def white_space_fix(UpperCamelCase_ : List[str] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase_ : Tuple ):
snake_case__ =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase_ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase_ ) ) ) )
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
return int(normalize_answer(UpperCamelCase_ ) == normalize_answer(UpperCamelCase_ ) )
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
snake_case__ =[any(compute_exact(UpperCamelCase_ , UpperCamelCase_ ) for ref in refs ) for pred, refs in zip(UpperCamelCase_ , UpperCamelCase_ )]
return (sum(UpperCamelCase_ ) / len(UpperCamelCase_ )) * 100
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ) -> Union[str, Any]:
snake_case__ =[rgram for rgrams in rgramslist for rgram in rgrams]
snake_case__ =Counter(UpperCamelCase_ )
snake_case__ =Counter(UpperCamelCase_ )
snake_case__ =Counter()
for sgram, scount in sgramcounter.items():
snake_case__ =scount * numref
snake_case__ =Counter(UpperCamelCase_ )
snake_case__ =Counter()
for cgram, ccount in cgramcounter.items():
snake_case__ =ccount * numref
# KEEP
snake_case__ =sgramcounter_rep & cgramcounter_rep
snake_case__ =keepgramcounter_rep & rgramcounter
snake_case__ =sgramcounter_rep & rgramcounter
snake_case__ =0
snake_case__ =0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case__ =1
snake_case__ =1
if len(UpperCamelCase_ ) > 0:
snake_case__ =keeptmpscorea / len(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
snake_case__ =keeptmpscorea / sum(keepgramcounterall_rep.values() )
snake_case__ =0
if keepscore_precision > 0 or keepscore_recall > 0:
snake_case__ =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
snake_case__ =sgramcounter_rep - cgramcounter_rep
snake_case__ =delgramcounter_rep - rgramcounter
snake_case__ =sgramcounter_rep - rgramcounter
snake_case__ =0
snake_case__ =0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case__ =1
if len(UpperCamelCase_ ) > 0:
snake_case__ =deltmpscorea / len(UpperCamelCase_ )
# ADDITION
snake_case__ =set(UpperCamelCase_ ) - set(UpperCamelCase_ )
snake_case__ =set(UpperCamelCase_ ) & set(UpperCamelCase_ )
snake_case__ =set(UpperCamelCase_ ) - set(UpperCamelCase_ )
snake_case__ =0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case__ =1
snake_case__ =1
if len(UpperCamelCase_ ) > 0:
snake_case__ =addtmpscore / len(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
snake_case__ =addtmpscore / len(UpperCamelCase_ )
snake_case__ =0
if addscore_precision > 0 or addscore_recall > 0:
snake_case__ =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str ) -> Optional[int]:
snake_case__ =len(UpperCamelCase_ )
snake_case__ =ssent.split(' ' )
snake_case__ =csent.split(' ' )
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
for rsent in rsents:
snake_case__ =rsent.split(' ' )
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
ragramslist.append(UpperCamelCase_ )
for i in range(0 , len(UpperCamelCase_ ) - 1 ):
if i < len(UpperCamelCase_ ) - 1:
snake_case__ =ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 2:
snake_case__ =ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 3:
snake_case__ =ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(UpperCamelCase_ )
ragramslist.append(UpperCamelCase_ )
ragramslist.append(UpperCamelCase_ )
ragramslist.append(UpperCamelCase_ )
for i in range(0 , len(UpperCamelCase_ ) - 1 ):
if i < len(UpperCamelCase_ ) - 1:
snake_case__ =sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 2:
snake_case__ =sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 3:
snake_case__ =sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(UpperCamelCase_ )
for i in range(0 , len(UpperCamelCase_ ) - 1 ):
if i < len(UpperCamelCase_ ) - 1:
snake_case__ =cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 2:
snake_case__ =cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 3:
snake_case__ =cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(UpperCamelCase_ )
((snake_case__) , (snake_case__) , (snake_case__)) =SARIngram(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((snake_case__) , (snake_case__) , (snake_case__)) =SARIngram(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((snake_case__) , (snake_case__) , (snake_case__)) =SARIngram(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((snake_case__) , (snake_case__) , (snake_case__)) =SARIngram(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
snake_case__ =sum([keepascore, keepascore, keepascore, keepascore] ) / 4
snake_case__ =sum([delascore, delascore, delascore, delascore] ) / 4
snake_case__ =sum([addascore, addascore, addascore, addascore] ) / 4
snake_case__ =(avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def a ( UpperCamelCase_ : Any , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = "13a" , UpperCamelCase_ : bool = True ) -> Dict:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
snake_case__ =sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
snake_case__ =sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase_ )()(UpperCamelCase_ )
else:
snake_case__ =sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase_ )
elif tokenizer == "moses":
snake_case__ =sacremoses.MosesTokenizer().tokenize(UpperCamelCase_ , return_str=UpperCamelCase_ , escape=UpperCamelCase_ )
elif tokenizer == "penn":
snake_case__ =sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase_ , return_str=UpperCamelCase_ )
else:
snake_case__ =sentence
if not return_str:
snake_case__ =normalized_sent.split()
return normalized_sent
def a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> List[str]:
if not (len(UpperCamelCase_ ) == len(UpperCamelCase_ ) == len(UpperCamelCase_ )):
raise ValueError('Sources length must match predictions and references lengths.' )
snake_case__ =0
for src, pred, refs in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
sari_score += SARIsent(normalize(UpperCamelCase_ ) , normalize(UpperCamelCase_ ) , [normalize(UpperCamelCase_ ) for sent in refs] )
snake_case__ =sari_score / len(UpperCamelCase_ )
return 100 * sari_score
def a ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]="exp" , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=False , ) -> Tuple:
snake_case__ =len(references[0] )
if any(len(UpperCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
snake_case__ =[[refs[i] for refs in references] for i in range(UpperCamelCase_ )]
snake_case__ =sacrebleu.corpus_bleu(
UpperCamelCase_ , UpperCamelCase_ , smooth_method=UpperCamelCase_ , smooth_value=UpperCamelCase_ , force=UpperCamelCase_ , lowercase=UpperCamelCase_ , use_effective_order=UpperCamelCase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
def _lowercase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
snake_case__ ={}
result.update({'sari': compute_sari(sources=_UpperCAmelCase , predictions=_UpperCAmelCase , references=_UpperCAmelCase )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=_UpperCAmelCase , references=_UpperCAmelCase )} )
result.update({'exact': compute_em(predictions=_UpperCAmelCase , references=_UpperCAmelCase )} )
return result
| 581
| 0
|
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase__ :
def __init__( self : List[str] ) -> List[str]:
A = {}
def __UpperCamelCase ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Any=1 ) -> Dict:
if self.graph.get(__UpperCamelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(__UpperCamelCase ):
A = []
def __UpperCamelCase ( self : int ) -> Any:
return list(self.graph )
def __UpperCamelCase ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] ) -> Dict:
if self.graph.get(__UpperCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Dict=-2 , __UpperCamelCase : str=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCamelCase ) != 0:
A = stack[len(__UpperCamelCase ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return visited
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : List[Any]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 10_000 ) + 10
for i in range(__UpperCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCamelCase , __UpperCamelCase , 1 )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Dict=-2 ) -> Any:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : int ) -> str:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[Any] ) -> Optional[Any]:
return len(self.graph[u] )
def __UpperCamelCase ( self : Any , __UpperCamelCase : Optional[int]=-2 ) -> List[str]:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCamelCase ) != 0:
A = stack[len(__UpperCamelCase ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return sorted_nodes
def __UpperCamelCase ( self : List[Any] ) -> int:
A = []
A = []
A = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(__UpperCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(__UpperCamelCase ) != 0:
A = stack[len(__UpperCamelCase ) - 1]
else:
A = False
indirect_parents.append(__UpperCamelCase )
A = s
A = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return list(__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(__UpperCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(__UpperCamelCase ) != 0:
A = stack[len(__UpperCamelCase ) - 1]
else:
A = False
indirect_parents.append(__UpperCamelCase )
A = s
A = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return False
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Tuple=-2 , __UpperCamelCase : Union[str, Any]=-1 ) -> Dict:
A = time()
self.dfs(__UpperCamelCase , __UpperCamelCase )
A = time()
return end - begin
def __UpperCamelCase ( self : Any , __UpperCamelCase : List[str]=-2 ) -> Optional[int]:
A = time()
self.bfs(__UpperCamelCase )
A = time()
return end - begin
class lowerCAmelCase__ :
def __init__( self : int ) -> Optional[int]:
A = {}
def __UpperCamelCase ( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=1 ) -> Any:
# check if the u exists
if self.graph.get(__UpperCamelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(__UpperCamelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def __UpperCamelCase ( self : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ) -> Dict:
if self.graph.get(__UpperCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCamelCase )
# the other way round
if self.graph.get(__UpperCamelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : Optional[Any]=-2 , __UpperCamelCase : List[str]=-1 ) -> Optional[int]:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCamelCase ) != 0:
A = stack[len(__UpperCamelCase ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return visited
def __UpperCamelCase ( self : int , __UpperCamelCase : str=-1 ) -> Optional[Any]:
if c == -1:
A = floor(random() * 10_000 ) + 10
for i in range(__UpperCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCamelCase , __UpperCamelCase , 1 )
def __UpperCamelCase ( self : int , __UpperCamelCase : List[Any]=-2 ) -> int:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str ) -> Optional[Any]:
return len(self.graph[u] )
def __UpperCamelCase ( self : Optional[int] ) -> str:
A = []
A = []
A = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(__UpperCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(__UpperCamelCase ) != 0:
A = stack[len(__UpperCamelCase ) - 1]
else:
A = False
indirect_parents.append(__UpperCamelCase )
A = s
A = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return list(__UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(__UpperCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(__UpperCamelCase ) != 0:
A = stack[len(__UpperCamelCase ) - 1]
else:
A = False
indirect_parents.append(__UpperCamelCase )
A = s
A = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return False
def __UpperCamelCase ( self : str ) -> int:
return list(self.graph )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[int]=-2 , __UpperCamelCase : Union[str, Any]=-1 ) -> Optional[Any]:
A = time()
self.dfs(__UpperCamelCase , __UpperCamelCase )
A = time()
return end - begin
def __UpperCamelCase ( self : int , __UpperCamelCase : List[Any]=-2 ) -> Optional[Any]:
A = time()
self.bfs(__UpperCamelCase )
A = time()
return end - begin
| 106
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__UpperCamelCase )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
A = model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase )
A = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A = TextStreamer(__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A = cs.out[:-1]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : int ) -> List[Any]:
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__UpperCamelCase )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
A = model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase )
A = tokenizer.decode(greedy_ids[0] )
A = TextIteratorStreamer(__UpperCamelCase )
A = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
A = Thread(target=model.generate , kwargs=__UpperCamelCase )
thread.start()
A = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : int ) -> Tuple:
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__UpperCamelCase )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
A = model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase )
A = greedy_ids[:, input_ids.shape[1] :]
A = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A = TextStreamer(__UpperCamelCase , skip_prompt=__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A = cs.out[:-1]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
A = AutoTokenizer.from_pretrained('distilgpt2' )
A = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(__UpperCamelCase )
A = -1
A = torch.ones((1, 5) , device=__UpperCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A = TextStreamer(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=1 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A = cs.out[:-1] # Remove the final "\n"
A = tokenizer(__UpperCamelCase , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __UpperCamelCase ( self : Any ) -> Dict:
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__UpperCamelCase )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
A = TextIteratorStreamer(__UpperCamelCase , timeout=0.0_0_1 )
A = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
A = Thread(target=model.generate , kwargs=__UpperCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__UpperCamelCase ):
A = ''
for new_text in streamer:
streamer_text += new_text
| 106
| 1
|
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
lowerCAmelCase : Tuple = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
lowerCAmelCase : Union[str, Any] = re.compile(r"""([a-z\d])([A-Z])""")
lowerCAmelCase : Any = re.compile(r"""(?<!_)_(?!_)""")
lowerCAmelCase : Tuple = re.compile(r"""(_{2,})""")
lowerCAmelCase : Any = r"""^\w+(\.\w+)*$"""
lowerCAmelCase : Optional[int] = r"""<>:/\|?*"""
def a__ ( snake_case__ ) -> Tuple:
lowerCamelCase = _uppercase_uppercase_re.sub(R"""\1_\2""" , snake_case__ )
lowerCamelCase = _lowercase_uppercase_re.sub(R"""\1_\2""" , snake_case__ )
return name.lower()
def a__ ( snake_case__ ) -> List[str]:
lowerCamelCase = _single_underscore_re.split(snake_case__ )
lowerCamelCase = [_multiple_underscores_re.split(snake_case__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(snake_case__ ) if n != """""" )
def a__ ( snake_case__ ) -> Optional[int]:
if os.path.basename(snake_case__ ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(snake_case__ )
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
if os.path.basename(snake_case__ ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , snake_case__ ):
raise ValueError(F'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return F'{filename_prefix_for_name(snake_case__ )}-{split}'
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None ) -> Tuple:
lowerCamelCase = filename_prefix_for_split(snake_case__ , snake_case__ )
if filetype_suffix:
prefix += F'.{filetype_suffix}'
lowerCamelCase = os.path.join(snake_case__ , snake_case__ )
return F'{filepath}*'
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ) -> Tuple:
lowerCamelCase = filename_prefix_for_split(snake_case__ , snake_case__ )
lowerCamelCase = os.path.join(snake_case__ , snake_case__ )
if shard_lengths:
lowerCamelCase = len(snake_case__ )
lowerCamelCase = [F'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(snake_case__ )]
if filetype_suffix:
lowerCamelCase = [filename + F'.{filetype_suffix}' for filename in filenames]
return filenames
else:
lowerCamelCase = prefix
if filetype_suffix:
filename += F'.{filetype_suffix}'
return [filename]
| 533
|
"""simple docstring"""
def a__ ( snake_case__ ) -> list:
if n_term == "":
return []
lowerCamelCase = []
for temp in range(int(snake_case__ ) ):
series.append(F'1/{temp + 1}' if series else """1""" )
return series
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 533
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'The column name of the images in the files.'} )
lowerCamelCase__ =field(default=__A , metadata={'help': 'A folder containing the training data.'} )
lowerCamelCase__ =field(default=__A , metadata={'help': 'A folder containing the validation data.'} )
lowerCamelCase__ =field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE : Dict = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE : Tuple = self.validation_dir
SCREAMING_SNAKE_CASE : Any = data_files if data_files else None
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCamelCase__ =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ =field(default=__A , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase__ =field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =field(
default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = torch.stack([example["pixel_values"] for example in examples])
return {"pixel_values": pixel_values}
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , _a , _a)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[Any] = training_args.get_process_log_level()
logger.setLevel(_a)
transformers.utils.logging.set_verbosity(_a)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}")
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Dict = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : List[str] = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Initialize our dataset.
SCREAMING_SNAKE_CASE : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE : Dict = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _a) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE : str = ds["train"].train_test_split(data_args.train_val_split)
SCREAMING_SNAKE_CASE : Tuple = split["train"]
SCREAMING_SNAKE_CASE : Dict = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : str = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **_a)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_a)
else:
SCREAMING_SNAKE_CASE : List[Any] = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
})
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_a)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_a)
else:
SCREAMING_SNAKE_CASE : Any = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
SCREAMING_SNAKE_CASE : Optional[int] = ViTMAEForPreTraining(_a)
if training_args.do_train:
SCREAMING_SNAKE_CASE : Optional[Any] = ds["train"].column_names
else:
SCREAMING_SNAKE_CASE : List[Any] = ds["validation"].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE : Optional[int] = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE : Tuple = "image"
elif "img" in column_names:
SCREAMING_SNAKE_CASE : str = "img"
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE : List[Any] = image_processor.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (image_processor.size["height"], image_processor.size["width"])
SCREAMING_SNAKE_CASE : Any = Compose(
[
Lambda(lambda _a: img.convert("RGB") if img.mode != "RGB" else img),
RandomResizedCrop(_a , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std),
])
def preprocess_images(_a):
SCREAMING_SNAKE_CASE : Any = [transforms(_a) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset")
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : int = ds["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
# Set the training transforms
ds["train"].set_transform(_a)
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset")
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : List[str] = (
ds["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
ds["validation"].set_transform(_a)
# Compute absolute learning rate
SCREAMING_SNAKE_CASE : Optional[int] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE : List[Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
SCREAMING_SNAKE_CASE : Any = Trainer(
model=_a , args=_a , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : int = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : Any = last_checkpoint
SCREAMING_SNAKE_CASE : Optional[int] = trainer.train(resume_from_checkpoint=_a)
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics)
trainer.save_metrics("train" , train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE : Dict = trainer.evaluate()
trainer.log_metrics("eval" , _a)
trainer.save_metrics("eval" , _a)
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE : List[str] = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a)
else:
trainer.create_model_card(**_a)
def lowerCamelCase__ ( _a):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 25
|
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_a)
if n > 1:
factors.append(_a)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25
| 1
|
"""simple docstring"""
def _a ( _snake_case , _snake_case , _snake_case = 0 , _snake_case = 0 ):
"""simple docstring"""
UpperCAmelCase = right or len(_snake_case ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_snake_case , _snake_case , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCamelCase = concatenate_datasets
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadManager
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 74
| 1
|
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase ( __UpperCamelCase = "isbn/0140328726" ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
UpperCAmelCase__ : Dict = F"{olid} is not a valid Open Library olid"
raise ValueError(__UpperCamelCase )
return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json()
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Any = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
UpperCAmelCase__ : Dict = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase__ : str = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
UpperCAmelCase__ : Dict = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Dict = """, """.join(__UpperCamelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__UpperCAmelCase = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
__UpperCAmelCase = summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('\n'.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 65
|
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_lowerCAmelCase :Any = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_lowerCAmelCase :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
_lowerCAmelCase :Optional[int] = [file for file in filepaths if """ """ in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
_lowerCAmelCase :List[str] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
_lowerCAmelCase :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
_lowerCAmelCase :str = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 251
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Dict = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 620
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCamelCase__ : Optional[int] = logging.getLogger(__name__)
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : torch.nn.Module , _SCREAMING_SNAKE_CASE : BnbQuantizationConfig , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
SCREAMING_SNAKE_CASE_ = []
# custom device map
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE )
# compatibility with peft
SCREAMING_SNAKE_CASE_ = load_in_abit
SCREAMING_SNAKE_CASE_ = load_in_abit
SCREAMING_SNAKE_CASE_ = get_parameter_device(_SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
# convert param to the right dtype
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' )
SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
param.to(_SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
SCREAMING_SNAKE_CASE_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = special_dtypes
SCREAMING_SNAKE_CASE_ = no_split_module_classes
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE_ = get_balanced_memory(
_SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ = max_memory
SCREAMING_SNAKE_CASE_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE_ = []
current_key_name.append(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE_ = '.'.join(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
SCREAMING_SNAKE_CASE_ = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE_ = module.bias.data
bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
with init_empty_weights():
SCREAMING_SNAKE_CASE_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE_ = find_tied_parameters(_SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE_ = sum(_SCREAMING_SNAKE_CASE , [] )
SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE_ = False
if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ):
SCREAMING_SNAKE_CASE_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE_ = list(model.named_children() )
SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE_ = ['.weight', '.bias']
SCREAMING_SNAKE_CASE_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE_ = name.replace(_SCREAMING_SNAKE_CASE , '' )
filtered_module_names.append(_SCREAMING_SNAKE_CASE )
return filtered_module_names
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for m in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = param_name
SCREAMING_SNAKE_CASE_ = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE_ = tensor_name.split('.' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
SCREAMING_SNAKE_CASE_ = new_module
SCREAMING_SNAKE_CASE_ = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE_ = False
offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
else:
offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 620
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = StableDiffusionDiffEditPipeline
lowerCamelCase :Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
lowerCamelCase :int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
lowerCamelCase :Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase :List[str] = frozenset([] )
def UpperCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , )
_A = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
_A = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase_ , set_alpha_to_zero=lowerCAmelCase_ , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
_A = CLIPTextModel(lowerCAmelCase_ )
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_A = floats_tensor((1, 16, 16) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_A = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_A = torch.manual_seed(lowerCAmelCase_ )
else:
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> str:
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_A = torch.manual_seed(lowerCAmelCase_ )
else:
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> List[Any]:
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_A = torch.manual_seed(lowerCAmelCase_ )
else:
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> int:
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
_A = self.get_dummy_components()
_A = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_A = self.get_dummy_inputs(lowerCAmelCase_ )
_A = pipe(**lowerCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase_ )
_A = self.pipeline_class.from_pretrained(lowerCAmelCase_ )
pipe_loaded.to(lowerCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase_ , lowerCAmelCase_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
_A = self.get_dummy_inputs(lowerCAmelCase_ )
_A = pipe_loaded(**lowerCAmelCase_ )[0]
_A = np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase_ , 1E-4 )
def UpperCAmelCase ( self ) -> List[str]:
_A = """cpu"""
_A = self.get_dummy_components()
_A = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = self.get_dummy_mask_inputs(lowerCAmelCase_ )
_A = pipe.generate_mask(**lowerCAmelCase_ )
_A = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
_A = np.array([0] * 9 )
_A = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = """cpu"""
_A = self.get_dummy_components()
_A = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = self.get_dummy_inversion_inputs(lowerCAmelCase_ )
_A = pipe.invert(**lowerCAmelCase_ ).images
_A = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_A = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
_A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 )
def UpperCAmelCase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCAmelCase ( self ) -> Tuple:
_A = """cpu"""
_A = self.get_dummy_components()
_A = {"""beta_start""": 0.0_0085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
_A = DPMSolverMultistepScheduler(**lowerCAmelCase_ )
_A = DPMSolverMultistepInverseScheduler(**lowerCAmelCase_ )
_A = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = self.get_dummy_inversion_inputs(lowerCAmelCase_ )
_A = pipe.invert(**lowerCAmelCase_ ).images
_A = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_A = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
_A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 )
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase ( cls ) -> str:
_A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
_A = raw_image.convert("""RGB""" ).resize((7_68, 7_68) )
_A = raw_image
def UpperCAmelCase ( self ) -> Any:
_A = torch.manual_seed(0 )
_A = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
_A = DDIMScheduler.from_config(pipe.scheduler.config )
_A = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = """a bowl of fruit"""
_A = """a bowl of pears"""
_A = pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCAmelCase_ , target_prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , )
_A = pipe.invert(
prompt=lowerCAmelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCAmelCase_ ).latents
_A = pipe(
prompt=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , image_latents=lowerCAmelCase_ , generator=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
_A = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCAmelCase ( self ) -> int:
_A = torch.manual_seed(0 )
_A = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
_A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_A = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = """a bowl of fruit"""
_A = """a bowl of pears"""
_A = pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCAmelCase_ , target_prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , )
_A = pipe.invert(
prompt=lowerCAmelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCAmelCase_ , num_inference_steps=25 , ).latents
_A = pipe(
prompt=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , image_latents=lowerCAmelCase_ , generator=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
_A = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 401
|
import string
from math import logaa
def snake_case ( snake_case__ :str , snake_case__ :str) -> int:
_A = document.translate(
str.maketrans("""""" , """""" , string.punctuation)).replace("""\n""" , """""")
_A = document_without_punctuation.split(""" """) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()])
def snake_case ( snake_case__ :str , snake_case__ :str) -> tuple[int, int]:
_A = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation)) # strip all punctuation and replace it with ''
_A = corpus_without_punctuation.split("""\n""")
_A = term.lower()
return (len([doc for doc in docs if term in doc]), len(snake_case__))
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :str=False) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""")
return round(1 + logaa(n / (1 + df)) , 3)
if df == 0:
raise ZeroDivisionError("""df must be > 0""")
elif n == 0:
raise ValueError("""log10(0) is undefined.""")
return round(logaa(n / df) , 3)
def snake_case ( snake_case__ :int , snake_case__ :int) -> float:
return round(tf * idf , 3)
| 401
| 1
|
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _a :
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE=0.2 ,__SCREAMING_SNAKE_CASE=0.2 ):
SCREAMING_SNAKE_CASE : Optional[int] = bp_numa
SCREAMING_SNAKE_CASE : List[Any] = bp_numa
SCREAMING_SNAKE_CASE : List[Any] = bp_numa
SCREAMING_SNAKE_CASE : List[Any] = conva_get[:2]
SCREAMING_SNAKE_CASE : Any = conva_get[2]
SCREAMING_SNAKE_CASE : Tuple = size_pa
SCREAMING_SNAKE_CASE : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE : Optional[int] = rate_t
SCREAMING_SNAKE_CASE : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE : Any = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE : Any = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE : Tuple = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE : str = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
def __a ( self ,__SCREAMING_SNAKE_CASE ):
# save model dict with pickle
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(UpperCAmelCase_ ,'wb' ) as f:
pickle.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
print(f"""Model saved: {save_path}""" )
@classmethod
def __a ( cls ,__SCREAMING_SNAKE_CASE ):
# read saved model
with open(UpperCAmelCase_ ,'rb' ) as f:
SCREAMING_SNAKE_CASE : str = pickle.load(UpperCAmelCase_ ) # noqa: S301
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
SCREAMING_SNAKE_CASE : str = model_dic.get('size_pooling1' )
SCREAMING_SNAKE_CASE : str = model_dic.get('num_bp1' )
SCREAMING_SNAKE_CASE : List[Any] = model_dic.get('num_bp2' )
SCREAMING_SNAKE_CASE : List[Any] = model_dic.get('num_bp3' )
SCREAMING_SNAKE_CASE : List[Any] = model_dic.get('rate_weight' )
SCREAMING_SNAKE_CASE : int = model_dic.get('rate_thre' )
# create model instance
SCREAMING_SNAKE_CASE : List[Any] = CNN(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
# modify model parameter
SCREAMING_SNAKE_CASE : Any = model_dic.get('w_conv1' )
SCREAMING_SNAKE_CASE : List[Any] = model_dic.get('wkj' )
SCREAMING_SNAKE_CASE : Tuple = model_dic.get('vji' )
SCREAMING_SNAKE_CASE : Tuple = model_dic.get('thre_conv1' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_dic.get('thre_bp2' )
SCREAMING_SNAKE_CASE : str = model_dic.get('thre_bp3' )
return conv_ins
def __a ( self ,__SCREAMING_SNAKE_CASE ):
return 1 / (1 + np.exp(-1 * x ))
def __a ( self ,__SCREAMING_SNAKE_CASE ):
return round(UpperCAmelCase_ ,3 )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
# convolution process
SCREAMING_SNAKE_CASE : str = convs[0]
SCREAMING_SNAKE_CASE : Optional[int] = convs[1]
SCREAMING_SNAKE_CASE : int = np.shape(UpperCAmelCase_ )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE : List[Any] = []
for i_focus in range(0 ,size_data - size_conv + 1 ,UpperCAmelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : str = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCAmelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i_focus in range(len(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : int = np.asmatrix(UpperCAmelCase_ ).reshape(
UpperCAmelCase_ ,UpperCAmelCase_ )
data_featuremap.append(UpperCAmelCase_ )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE : Tuple = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : str = np.asarray(UpperCAmelCase_ )
return focus_list, data_featuremap
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE : str = len(featuremaps[0] )
SCREAMING_SNAKE_CASE : List[str] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE : Dict = []
for i_map in range(len(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = featuremaps[i_map]
SCREAMING_SNAKE_CASE : Dict = []
for i_focus in range(0 ,UpperCAmelCase_ ,UpperCAmelCase_ ):
for j_focus in range(0 ,UpperCAmelCase_ ,UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCAmelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = np.asmatrix(UpperCAmelCase_ ).reshape(UpperCAmelCase_ ,UpperCAmelCase_ )
featuremap_pooled.append(UpperCAmelCase_ )
return featuremap_pooled
def __a ( self ,__SCREAMING_SNAKE_CASE ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE : Dict = np.shape(data[i] )
SCREAMING_SNAKE_CASE : Dict = data[i].reshape(1 ,shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE : str = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = np.asarray(UpperCAmelCase_ )
return data_expanded
def __a ( self ,__SCREAMING_SNAKE_CASE ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE : int = np.asarray(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.shape(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : int = 0
for i_map in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,UpperCAmelCase_ ,UpperCAmelCase_ ):
for j in range(0 ,UpperCAmelCase_ ,UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE : Optional[Any] = i_pool + 1
SCREAMING_SNAKE_CASE : Dict = np.multiply(
UpperCAmelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(UpperCAmelCase_ )
return pd_all
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE=bool ):
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(UpperCAmelCase_ )) )
print((' - - Shape: Teach_Data ', np.shape(UpperCAmelCase_ )) )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : List[Any] = 10000
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE : int = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(UpperCAmelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE : Optional[Any] = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.convolute(
UpperCAmelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
SCREAMING_SNAKE_CASE : str = self.pooling(UpperCAmelCase_ ,self.size_poolinga )
SCREAMING_SNAKE_CASE : Tuple = np.shape(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self._expand(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = data_bp_input
SCREAMING_SNAKE_CASE : List[Any] = np.dot(UpperCAmelCase_ ,self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE : Tuple = self.sig(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = np.dot(UpperCAmelCase_ ,self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE : int = self.sig(UpperCAmelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE : int = np.multiply(
(data_teach - bp_outa) ,np.multiply(UpperCAmelCase_ ,(1 - bp_outa) ) )
SCREAMING_SNAKE_CASE : int = np.multiply(
np.dot(UpperCAmelCase_ ,self.wkj ) ,np.multiply(UpperCAmelCase_ ,(1 - bp_outa) ) )
SCREAMING_SNAKE_CASE : Optional[Any] = np.dot(UpperCAmelCase_ ,self.vji )
SCREAMING_SNAKE_CASE : Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE : Dict = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE : Optional[Any] = self._calculate_gradient_from_pool(
UpperCAmelCase_ ,UpperCAmelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE : Dict = self.rate_weight * np.dot(UpperCAmelCase_ ,UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE : Union[str, Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE : Tuple = rp + 1
SCREAMING_SNAKE_CASE : Any = error_count / patterns
all_mse.append(UpperCAmelCase_ )
def draw_error():
SCREAMING_SNAKE_CASE : Optional[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(UpperCAmelCase_ ,'+-' )
plt.plot(UpperCAmelCase_ ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(UpperCAmelCase_ ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def __a ( self ,__SCREAMING_SNAKE_CASE ):
# model predict
SCREAMING_SNAKE_CASE : Any = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(UpperCAmelCase_ )) )
for p in range(len(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE : List[str] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.convolute(
UpperCAmelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
SCREAMING_SNAKE_CASE : Dict = self.pooling(UpperCAmelCase_ ,self.size_poolinga )
SCREAMING_SNAKE_CASE : str = self._expand(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = data_bp_input
SCREAMING_SNAKE_CASE : List[str] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE : int = self.sig(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE : int = self.sig(UpperCAmelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE : List[str] = [list(map(self.do_round ,UpperCAmelCase_ ) ) for each in produce_out]
return np.asarray(UpperCAmelCase_ )
def __a ( self ,__SCREAMING_SNAKE_CASE ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE : int = np.asmatrix(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.convolute(
UpperCAmelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
SCREAMING_SNAKE_CASE : List[Any] = self.pooling(UpperCAmelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 711
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A = DDIMPipeline
A = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
A = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
A = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
A = False
def __a ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
SCREAMING_SNAKE_CASE : str = DDIMScheduler()
SCREAMING_SNAKE_CASE : Union[str, Any] = {'unet': unet, 'scheduler': scheduler}
return components
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __a ( self ):
SCREAMING_SNAKE_CASE : Optional[Any] = 'cpu'
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Any = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
SCREAMING_SNAKE_CASE : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE ,1e-3 )
def __a ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __a ( self ):
super().test_save_load_local(expected_max_difference=3e-3 )
def __a ( self ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __a ( self ):
SCREAMING_SNAKE_CASE : Any = 'google/ddpm-cifar10-32'
SCREAMING_SNAKE_CASE : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = DDIMScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE ,scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = ddim(generator=__SCREAMING_SNAKE_CASE ,eta=0.0 ,output_type='numpy' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self ):
SCREAMING_SNAKE_CASE : Tuple = 'google/ddpm-ema-bedroom-256'
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE ,scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE ,output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 220
| 0
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Dict,_SCREAMING_SNAKE_CASE : Union[str, Any],_SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__A= state_dict.pop(_SCREAMING_SNAKE_CASE )
__A= val
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__A= OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__A= key.replace('backbone.0.body','backbone.conv_encoder.model' )
__A= value
else:
__A= value
return new_state_dict
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__A= ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__A= state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__A= state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__A= in_proj_weight[:256, :]
__A= in_proj_bias[:256]
__A= in_proj_weight[256:512, :]
__A= in_proj_bias[256:512]
__A= in_proj_weight[-256:, :]
__A= in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__A= state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__A= state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__A= in_proj_weight[:256, :]
__A= in_proj_bias[:256]
__A= in_proj_weight[256:512, :]
__A= in_proj_bias[256:512]
__A= in_proj_weight[-256:, :]
__A= in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__A= state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__A= state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__A= in_proj_weight_cross_attn[:256, :]
__A= in_proj_bias_cross_attn[:256]
__A= in_proj_weight_cross_attn[256:512, :]
__A= in_proj_bias_cross_attn[256:512]
__A= in_proj_weight_cross_attn[-256:, :]
__A= in_proj_bias_cross_attn[-256:]
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Any,_SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__A, __A= image.size
__A= max(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
__A= 800 if 'detection' in checkpoint_url else 1000
__A= target_max_size / current_max_size
__A= image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__A= F.to_tensor(_SCREAMING_SNAKE_CASE )
__A= F.normalize(_SCREAMING_SNAKE_CASE,mean=[0.4_85, 0.4_56, 0.4_06],std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Dict,_SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
__A= torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE,map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
__A= rename_backbone_keys(_SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(_SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__A= 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__A= state_dict.pop(_SCREAMING_SNAKE_CASE )
__A= val
# create HuggingFace model and load state dict
__A= TableTransformerConfig(
backbone='resnet18',mask_loss_coefficient=1,dice_loss_coefficient=1,ce_loss_coefficient=1,bbox_loss_coefficient=5,giou_loss_coefficient=2,eos_coefficient=0.4,class_cost=1,bbox_cost=5,giou_cost=2,)
if "detection" in checkpoint_url:
__A= 15
__A= 2
__A= {0: 'table', 1: 'table rotated'}
__A= idalabel
__A= {v: k for k, v in idalabel.items()}
else:
__A= 125
__A= 6
__A= {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
__A= idalabel
__A= {v: k for k, v in idalabel.items()}
__A= DetrImageProcessor(
format='coco_detection',max_size=800 if 'detection' in checkpoint_url else 1000 )
__A= TableTransformerForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
__A= 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
__A= hf_hub_download(repo_id='nielsr/example-pdf',repo_type='dataset',filename=_SCREAMING_SNAKE_CASE )
__A= Image.open(_SCREAMING_SNAKE_CASE ).convert('RGB' )
__A= normalize(resize(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
__A= model(_SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
__A= (1, 15, 3)
__A= torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
__A= torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
__A= (1, 125, 7)
__A= torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
__A= torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3],_SCREAMING_SNAKE_CASE,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3],_SCREAMING_SNAKE_CASE,atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
__A= (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 186
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
UpperCAmelCase__ = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
UpperCAmelCase__ = {
'''AI-Sweden/gpt-sw3-126m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-350m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-1.6b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-6.7b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-20b''': 2_0_4_8,
}
class a__ ( a_ ):
'''simple docstring'''
A : int = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> None:
__A= {} if sp_model_kwargs is None else sp_model_kwargs
__A= kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
__A= 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__A= '<|endoftext|>' if eos_token is None else eos_token
__A= '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__A= unk_token if pad_token is None else pad_token
__A= eos_token if bos_token is None else bos_token
else:
__A= '<pad>' if pad_token is None else pad_token
__A= '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__A= do_lower_case
__A= remove_space
__A= keep_accents
__A= vocab_file
__A= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
# Used for whitespace normalization in input texts
# fmt : off
__A= {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__A= re.compile(
F"""[{"".join(map(lowerCAmelCase_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" )
def __getstate__( self : Optional[int] ) -> Tuple:
__A= self.__dict__.copy()
__A= None
return state
def __setstate__( self : int , lowerCAmelCase_ : int ) -> Tuple:
__A= d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A= {}
__A= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCAmelCase ( self : Tuple ) -> int:
return len(self.sp_model )
def lowerCAmelCase ( self : int , lowerCAmelCase_ : str ) -> str:
__A= self.non_printing_characters_re.sub('' , lowerCAmelCase_ )
# Normalize whitespaces
__A= ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
__A= unicodedata.normalize('NFC' , lowerCAmelCase_ )
return text
def lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> List[str]:
__A= self.preprocess_text(lowerCAmelCase_ )
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowerCAmelCase ( self : Any , lowerCAmelCase_ : str ) -> int:
return self.sp_model.PieceToId(lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
return self.sp_model.IdToPiece(lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ : str ) -> str:
return out_string
def lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> str:
__A= []
__A= ''
__A= False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__A= True
__A= []
else:
current_sub_tokens.append(lowerCAmelCase_ )
__A= False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string
def lowerCAmelCase ( self : List[Any] ) -> Dict[str, int]:
__A= {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A= os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__A= self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__A= self.preprocess_text(lowerCAmelCase_ )
__A= self.sp_model.encode(lowerCAmelCase_ )
else:
__A= [self.preprocess_text(lowerCAmelCase_ ) for t in text]
__A= self.sp_model.encode(lowerCAmelCase_ )
if return_tensors is True or return_tensors == "pt":
__A= torch.tensor(lowerCAmelCase_ )
return token_ids
def lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Union[int, List[int]] ) -> str:
return self.sp_model.decode(lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : "Conversation" ) -> List[int]:
__A= [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__A= (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(lowerCAmelCase_ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=lowerCAmelCase_ )
| 186
| 1
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = "src/transformers"
a_ = "docs/source/en/tasks"
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_A = f.readlines()
# Find the start prompt.
_A = 0
while not lines[start_index].startswith(__lowercase ):
start_index += 1
start_index += 1
_A = start_index
while not lines[end_index].startswith(__lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def a__ ( __lowercase ) -> Dict:
_A = TASK_GUIDE_TO_MODELS[task_guide]
_A = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__lowercase , set() )
_A = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A , _A , _A , _A = _find_text_in_file(
filename=os.path.join(__lowercase , __lowercase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
_A = get_model_list_for_task(__lowercase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__lowercase , __lowercase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 719
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spiece.model"}
a_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
a_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
a_ = "▁"
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : str="<pad>" , a__ : Optional[int]=1_00 , a__ : List[Any]=None , a__ : Optional[Dict[str, Any]] = None , a__ : Any=True , **a__ : Optional[int] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_A = [F"""<extra_id_{i}>""" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_A = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
_A = legacy
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_A = vocab_file
_A = extra_ids
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> Tuple:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self : List[Any] , a__ : List[int] ) -> List[int]:
'''simple docstring'''
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_A = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : int , a__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : int , a__ : "TextInput" , **a__ : List[str] ) -> List[str]:
'''simple docstring'''
if not self.legacy:
_A = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self : str , a__ : Dict , **a__ : Optional[int] ) -> Any:
'''simple docstring'''
if not self.legacy:
_A = text.startswith(a__ )
if is_first:
_A = text[1:]
_A = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
_A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self : int , a__ : List[Any] ) -> List[str]:
'''simple docstring'''
if token.startswith("<extra_id_" ):
_A = re.match(r"<extra_id_(\d+)>" , a__ )
_A = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self : Dict , a__ : Union[str, Any] ) -> Any:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_A = self.sp_model.IdToPiece(a__ )
else:
_A = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def a_ ( self : Optional[int] , a__ : Tuple ) -> List[str]:
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(a__ )
_A = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 621
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case__ ( UpperCamelCase_ ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( _lowerCamelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : Any ):
raise NotImplementedError()
| 170
|
def lowercase__( A ):
snake_case__ : Optional[Any] = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowercase__( A ):
snake_case__ : List[Any] = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
snake_case__ : Optional[Any] = remove_duplicates(key.upper() )
snake_case__ : Dict = len(A )
# First fill cipher with key characters
snake_case__ : Optional[int] = {alphabet[i]: char for i, char in enumerate(A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A ) , 2_6 ):
snake_case__ : List[Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
snake_case__ : Dict = alphabet[i - offset]
snake_case__ : Optional[int] = char
return cipher_alphabet
def lowercase__( A , A ):
return "".join(cipher_map.get(A , A ) for ch in message.upper() )
def lowercase__( A , A ):
snake_case__ : Union[str, Any] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A , A ) for ch in message.upper() )
def lowercase__( ):
snake_case__ : Union[str, Any] = input('Enter message to encode or decode: ' ).strip()
snake_case__ : str = input('Enter keyword: ' ).strip()
snake_case__ : Optional[int] = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
snake_case__ : Optional[Any] = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
snake_case__ : int = create_cipher_map(A )
print(func(A , A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 170
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class lowerCamelCase ( _UpperCAmelCase ):
__lowerCamelCase = 42
__lowerCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class lowerCamelCase ( _UpperCAmelCase ):
__lowerCamelCase = 42
__lowerCamelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 708
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCamelCase :
__lowerCamelCase = 42
__lowerCamelCase = None
__lowerCamelCase = None
def a_ (_lowerCAmelCase : TreeNode | None )-> bool:
# Validation
def is_valid_tree(_lowerCAmelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_lowerCAmelCase ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
_lowerCAmelCase : TreeNode | None , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _lowerCAmelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _lowerCAmelCase )
)
return is_binary_search_tree_recursive_check(_lowerCAmelCase , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case ( UpperCamelCase_ ):
def __init__( self : str , a_ : Optional[Any] , a_ : Optional[Any]=13 , a_ : List[Any]=7 , a_ : Optional[int]=True , a_ : Any=True , a_ : Dict=True , a_ : Tuple=True , a_ : str=True , a_ : Tuple=False , a_ : Optional[int]=False , a_ : Union[str, Any]=False , a_ : int=2 , a_ : Dict=99 , a_ : Union[str, Any]=0 , a_ : str=32 , a_ : Union[str, Any]=5 , a_ : Any=4 , a_ : int=0.1 , a_ : Any=0.1 , a_ : List[str]=512 , a_ : Optional[int]=12 , a_ : Union[str, Any]=2 , a_ : List[Any]=0.02 , a_ : Tuple=3 , a_ : int=4 , a_ : Optional[int]="last" , a_ : List[str]=None , a_ : List[Any]=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : str = use_input_lengths
SCREAMING_SNAKE_CASE__ : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : List[str] = gelu_activation
SCREAMING_SNAKE_CASE__ : str = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ : Any = causal
SCREAMING_SNAKE_CASE__ : Optional[Any] = asm
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_langs
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_special
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = num_choices
SCREAMING_SNAKE_CASE__ : List[Any] = summary_type
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_proj
SCREAMING_SNAKE_CASE__ : List[str] = scope
def __lowercase( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase( self : int )-> Any:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Any , a_ : Union[str, Any] , a_ : Any , a_ : Union[str, Any] , a_ : List[str] , a_ : List[Any] , a_ : int , a_ : Any , )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = FlaubertModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , lengths=a_ , langs=a_ )
SCREAMING_SNAKE_CASE__ : int = model(a_ , langs=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : str , a_ : str , a_ : int , a_ : Union[str, Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Any , a_ : int , a_ : Tuple , a_ : str , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaubertWithLMHeadModel(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self : Any , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : str , a_ : int , a_ : Union[str, Any] , a_ : List[Any] , a_ : Any , a_ : Any , a_ : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = FlaubertForQuestionAnsweringSimple(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase( self : Tuple , a_ : Tuple , a_ : int , a_ : Optional[Any] , a_ : Optional[int] , a_ : Tuple , a_ : List[Any] , a_ : int , a_ : Optional[int] , a_ : int , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaubertForQuestionAnswering(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , )
((SCREAMING_SNAKE_CASE__) , ) : Optional[Any] = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , start_positions=a_ , end_positions=a_ )
((SCREAMING_SNAKE_CASE__) , ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowercase( self : Dict , a_ : int , a_ : Optional[int] , a_ : Tuple , a_ : Dict , a_ : Any , a_ : List[str] , a_ : str , a_ : List[Any] , a_ : int , )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = FlaubertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : List[str] , a_ : Tuple , a_ : str , a_ : Optional[Any] , a_ : List[Any] , a_ : Tuple , a_ : List[Any] , a_ : List[Any] , a_ : Optional[int] , a_ : List[str] , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = FlaubertForTokenClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase( self : Optional[int] , a_ : Union[str, Any] , a_ : str , a_ : List[str] , a_ : Any , a_ : Any , a_ : Any , a_ : List[Any] , a_ : Optional[Any] , a_ : Any , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_choices
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaubertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Dict = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowercase( self : Union[str, Any] , a_ : str , a_ : List[Any] , a_ : int , a_ : str , a_ : Union[str, Any] )-> int:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase( self : Optional[Any] , a_ : List[Any] , a_ : int , a_ : List[str]=False )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self , config_class=a_ , emb_dim=37 )
def __lowercase( self : Dict )-> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a_ )
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a_ )
def __lowercase( self : Union[str, Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a_ )
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a_ )
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a_ )
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a_ )
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a_ )
@slow
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Tuple = FlaubertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Any = model_class(config=a_ )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = torch.jit.trace(
a_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , 'traced_model.pt' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.jit.load(os.path.join(a_ , 'traced_model.pt' ) , map_location=a_ )
loaded(inputs_dict['input_ids'].to(a_ ) , inputs_dict['attention_mask'].to(a_ ) )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )[0]
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
| 85
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = StableDiffusionInstructPixaPixPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase( self : str )-> int:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=a_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(a_ )
SCREAMING_SNAKE_CASE__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE__ : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase( self : List[Any] , a_ : Tuple , a_ : Optional[Any]=0 )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(np.uinta(a_ ) ).convert('RGB' )
if str(a_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(a_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
SCREAMING_SNAKE_CASE__ : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : int = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Dict = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : Optional[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'french fries'
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**a_ , negative_prompt=a_ )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [inputs['prompt']] * 2
SCREAMING_SNAKE_CASE__ : List[str] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(a_ ).unsqueeze(0 ).to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = image / 2 + 0.5
SCREAMING_SNAKE_CASE__ : Tuple = image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ : int = image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' )
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : Dict = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Any = [round(a_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(a_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : int = VaeImageProcessor(do_resize=a_ , do_normalize=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Any = pipe(**self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = components['vae']
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs_by_type(a_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(a_ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase( self : List[Any] , a_ : Dict=0 )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
SCREAMING_SNAKE_CASE__ : Tuple = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __lowercase( self : int )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ )
SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Dict = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ )
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase( self : int )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = 0
def callback_fn(a_ : int , a_ : int , a_ : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE__ : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : List[Any] = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
SCREAMING_SNAKE_CASE__ : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : Tuple = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Dict = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs()
pipe(**a_ , callback=a_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowercase( self : int )-> Any:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**a_ )
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE__ : Dict = inputs['image'].resize((504, 504) )
SCREAMING_SNAKE_CASE__ : List[Any] = 'timbrooks/instruct-pix2pix'
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
a_ , safety_checker=a_ , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Any = pipe(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
SCREAMING_SNAKE_CASE__ : Any = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 85
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
__lowerCamelCase : List[str] =['image_processor', 'tokenizer']
__lowerCamelCase : Tuple ='ViTImageProcessor'
__lowerCamelCase : Optional[int] =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[int] , __lowercase : int=None , __lowercase : Any=None , **__lowercase : Any ):
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowercase , )
__a = kwargs.pop("""feature_extractor""" )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__lowercase , __lowercase )
def __call__( self : List[Any] , __lowercase : Optional[int]=None , __lowercase : Optional[int]=None , __lowercase : List[str]=None , __lowercase : Tuple=None , **__lowercase : Optional[int] ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
__a = self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if visual_prompt is not None:
__a = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
__a = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if visual_prompt is not None and images is not None:
__a = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__a = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def UpperCamelCase_ ( self : List[str] , *__lowercase : int , **__lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Optional[Any] , *__lowercase : List[str] , **__lowercase : Any ):
'''simple docstring'''
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowercase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowercase , )
return self.image_processor
| 703
|
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Iterable[str] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = iter(_SCREAMING_SNAKE_CASE )
while True:
__a = tuple(itertools.islice(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if not chunk:
return
yield chunk
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
__a = """"""
if len(_SCREAMING_SNAKE_CASE ) < 2:
return dirty
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_SCREAMING_SNAKE_CASE ) & 1:
clean += "X"
return clean
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__a = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_SCREAMING_SNAKE_CASE )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_SCREAMING_SNAKE_CASE )
return table
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = generate_table(_SCREAMING_SNAKE_CASE )
__a = prepare_input(_SCREAMING_SNAKE_CASE )
__a = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_SCREAMING_SNAKE_CASE , 2 ):
__a , __a = divmod(table.index(_SCREAMING_SNAKE_CASE ) , 5 )
__a , __a = divmod(table.index(_SCREAMING_SNAKE_CASE ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = generate_table(_SCREAMING_SNAKE_CASE )
__a = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_SCREAMING_SNAKE_CASE , 2 ):
__a , __a = divmod(table.index(_SCREAMING_SNAKE_CASE ) , 5 )
__a , __a = divmod(table.index(_SCREAMING_SNAKE_CASE ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 547
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : str = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 34
| 0
|
from __future__ import annotations
from typing import TypedDict
class UpperCAmelCase_ ( a__):
'''simple docstring'''
__UpperCamelCase : str
__UpperCamelCase : int
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_lowercase ) )]
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
UpperCamelCase : str = all_rotations(_lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCamelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowercase ),
}
return response
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
UpperCamelCase : Tuple = int(_lowercase )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_lowercase ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
UpperCamelCase : str = [""] * len(_lowercase )
for _ in range(len(_lowercase ) ):
for i in range(len(_lowercase ) ):
UpperCamelCase : int = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = '''Provide a string that I will generate its BWT transform: '''
__UpperCAmelCase : Optional[int] = input(entry_msg).strip()
__UpperCAmelCase : List[Any] = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
__UpperCAmelCase : Any = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
)
| 709
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase : Optional[int] = 500000
__UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__)
__UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ )
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase : List[str] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ )
def tokenize(SCREAMING_SNAKE_CASE_ : Dict ):
return tokenizer(examples['''text'''] )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''numpy''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''pandas''' ):
UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 643
| 0
|
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase (_snake_case = "" ) -> dict[str, float]:
'''simple docstring'''
__UpperCamelCase = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__UpperCamelCase = BeautifulSoup(requests.get(_snake_case ).text ,"html.parser" )
__UpperCamelCase = soup.find_all("td" ,attrs="titleColumn" )
__UpperCamelCase = soup.find_all("td" ,class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_snake_case ,_snake_case )
}
def lowercase (_snake_case = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
__UpperCamelCase = get_imdb_top_aaa_movies()
with open(_snake_case ,"w" ,newline="" ) as out_file:
__UpperCamelCase = csv.writer(_snake_case )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 505
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = KandinskyInpaintPipeline
_snake_case : int = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_snake_case : str = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_snake_case : Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_snake_case : Optional[Any] = False
@property
def A ( self : int )-> Tuple:
return 32
@property
def A ( self : int )-> List[Any]:
return 32
@property
def A ( self : Dict )-> Tuple:
return self.time_input_dim
@property
def A ( self : Union[str, Any] )-> Tuple:
return self.time_input_dim * 4
@property
def A ( self : Dict )-> str:
return 1_00
@property
def A ( self : int )-> Dict:
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def A ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def A ( self : int )-> str:
torch.manual_seed(0 )
__UpperCamelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def A ( self : Optional[int] )-> Union[str, Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : List[str] )-> Tuple:
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : str )-> List[Any]:
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
__UpperCamelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A ( self : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[Any]=0 )-> Dict:
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
__UpperCamelCase = np.ones((64, 64) , dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith("mps" ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def A ( self : Optional[int] )-> Dict:
__UpperCamelCase = "cpu"
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def A ( self : Union[str, Any] )-> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str )-> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any )-> str:
__UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
__UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__UpperCamelCase = np.ones((7_68, 7_68) , dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = "a hat"
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__UpperCamelCase = pipeline(
A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
__UpperCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A_ , A_ )
| 505
| 1
|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Dict ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self._create_example_records()
_UpperCAmelCase = Dataset.from_list(snake_case_ )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(snake_case_ ):
self.assertDictEqual(snake_case_ , example_records[i] )
def lowercase ( self : str ):
_UpperCAmelCase = self._create_example_records()
_UpperCAmelCase = Dataset.from_list(snake_case_ )
_UpperCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase ( self : str ): # checks what happens with missing columns
_UpperCAmelCase = [{"col_1": 1}, {"col_2": "x"}]
_UpperCAmelCase = Dataset.from_list(snake_case_ )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def lowercase ( self : List[Any] ): # checks if the type can be inferred from the second record
_UpperCAmelCase = [{"col_1": []}, {"col_1": [1, 2]}]
_UpperCAmelCase = Dataset.from_list(snake_case_ )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = Dataset.from_list([] )
self.assertEqual(len(snake_case_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 707
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger()
@dataclass
class A_ :
_lowerCamelCase : nn.Module
_lowerCamelCase : List[nn.Module] = field(default_factory=lowerCAmelCase_ )
_lowerCamelCase : list = field(default_factory=lowerCAmelCase_ )
def lowercase ( self : Dict , snake_case_ : Dict , snake_case_ : Tensor , snake_case_ : Tensor ):
_UpperCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(snake_case_ , nn.Convad ) or isinstance(snake_case_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case_ )
def __call__( self : str , snake_case_ : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case_ )
[x.remove() for x in self.handles]
return self
@property
def lowercase ( self : int ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
_lowerCamelCase : nn.Module
_lowerCamelCase : nn.Module
_lowerCamelCase : int = 1
_lowerCamelCase : List = field(default_factory=lowerCAmelCase_ )
_lowerCamelCase : List = field(default_factory=lowerCAmelCase_ )
_lowerCamelCase : bool = True
def __call__( self : str , snake_case_ : Tensor ):
_UpperCAmelCase = Tracker(self.dest )(snake_case_ ).parametrized
_UpperCAmelCase = Tracker(self.src )(snake_case_ ).parametrized
_UpperCAmelCase = list(filter(lambda snake_case_ : type(snake_case_ ) not in self.src_skip , snake_case_ ) )
_UpperCAmelCase = list(filter(lambda snake_case_ : type(snake_case_ ) not in self.dest_skip , snake_case_ ) )
if len(snake_case_ ) != len(snake_case_ ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case_ )} operations while'
f' destination module has {len(snake_case_ )}.' )
for dest_m, src_m in zip(snake_case_ , snake_case_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class A_ ( nn.Module ):
def __init__( self : str , snake_case_ : nn.Module ):
super().__init__()
_UpperCAmelCase = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f'Unexpected layer name {k}'
_UpperCAmelCase = len(snake_case_ ) + 1
feature_blocks.append((f'res{block_index}', v) )
_UpperCAmelCase = nn.ModuleDict(snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : Tensor ):
return get_trunk_forward_outputs(
snake_case_ , out_feat_keys=snake_case_ , feature_blocks=self._feature_blocks , )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Any , snake_case_ : str ):
_UpperCAmelCase = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Any , snake_case_ : str ):
# default to timm!
if x not in self:
_UpperCAmelCase = self.convert_name_to_timm(snake_case_ )
_UpperCAmelCase = partial(lambda: (timm.create_model(snake_case_ , pretrained=snake_case_ ).eval(), None) )
else:
_UpperCAmelCase = super().__getitem__(snake_case_ )
return val
class A_ ( lowerCAmelCase_ ):
def __getitem__( self : Tuple , snake_case_ : str ):
if "seer" in x and "in1k" not in x:
_UpperCAmelCase = RegNetModel
else:
_UpperCAmelCase = RegNetForImageClassification
return val
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : List[Tuple[str, str]] ) -> List[Any]:
'''simple docstring'''
for from_key, to_key in keys:
_UpperCAmelCase = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Callable[[], nn.Module] , __lowercase : Callable[[], nn.Module] , __lowercase : RegNetConfig , __lowercase : Path , __lowercase : bool = True , ) -> str:
'''simple docstring'''
print(f'Converting {name}...' )
with torch.no_grad():
_UpperCAmelCase , _UpperCAmelCase = from_model_func()
_UpperCAmelCase = our_model_func(__lowercase ).eval()
_UpperCAmelCase = ModuleTransfer(src=__lowercase , dest=__lowercase , raise_if_mismatch=__lowercase )
_UpperCAmelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__lowercase )
if from_state_dict is not None:
_UpperCAmelCase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_UpperCAmelCase = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
_UpperCAmelCase = manually_copy_vissl_head(__lowercase , our_model.state_dict() , __lowercase )
our_model.load_state_dict(__lowercase )
_UpperCAmelCase = our_model(__lowercase , output_hidden_states=__lowercase )
_UpperCAmelCase = (
our_outputs.logits if isinstance(__lowercase , __lowercase ) else our_outputs.last_hidden_state
)
_UpperCAmelCase = from_model(__lowercase )
_UpperCAmelCase = from_output[-1] if type(__lowercase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_UpperCAmelCase = our_outputs.hidden_states[-1]
assert torch.allclose(__lowercase , __lowercase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__lowercase , )
_UpperCAmelCase = 224 if "seer" not in name else 384
# we can use the convnext one
_UpperCAmelCase = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__lowercase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__lowercase , )
print(f'Pushed {name}' )
def UpperCAmelCase_ ( __lowercase : Path , __lowercase : str = None , __lowercase : bool = True ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = 1000
_UpperCAmelCase = (1, num_labels)
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = num_labels
_UpperCAmelCase = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type="dataset" ) ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
_UpperCAmelCase = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
_UpperCAmelCase = NameToOurModelFuncMap()
_UpperCAmelCase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowercase : str , __lowercase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
_UpperCAmelCase = torch.hub.load_state_dict_from_url(__lowercase , model_dir=str(__lowercase ) , map_location="cpu" )
_UpperCAmelCase = model_func()
# check if we have a head, if yes add it
_UpperCAmelCase = files["classy_state_dict"]["base_model"]["model"]
_UpperCAmelCase = model_state_dict["trunk"]
model.load_state_dict(__lowercase )
return model.eval(), model_state_dict["heads"]
# pretrained
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowercase , __lowercase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowercase , __lowercase , __lowercase , )
return config, expected_shape
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__SCREAMING_SNAKE_CASE :Any = parser.parse_args()
__SCREAMING_SNAKE_CASE :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 119
| 0
|
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCAmelCase_ (__a : List[str] , __a : Optional[int] , __a : int , __a : Any ):
"""simple docstring"""
_a : Optional[int] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a : Optional[int] = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
_a : Optional[int] = f"""{src_lang}-{tgt_lang}"""
_a : int = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=__a , exist_ok=__a )
_a : Tuple = os.path.join(__a , 'README.md' )
print(f"""Generating {path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(__a )
# make sure we are under the root of the project
__lowerCAmelCase = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__lowerCAmelCase = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 229
|
'''simple docstring'''
import operator as op
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = []
_a : List[str] = lambda __a , __a : int(x / y ) # noqa: E731 integer division operation
_a : List[Any] = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(1_2 ) , 'Stack' , sep=' | ' )
print('-' * (3_0 + len(__a )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__a ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
else:
_a : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
_a : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
stack.append(
str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__lowerCAmelCase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 229
| 1
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowerCamelCase = Lock()
def _lowerCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : int ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__lowerCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__SCREAMING_SNAKE_CASE : Union[str, Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__SCREAMING_SNAKE_CASE : Tuple = min(__lowerCamelCase , __lowerCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__lowerCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__SCREAMING_SNAKE_CASE : Tuple = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__SCREAMING_SNAKE_CASE : List[Any] = max(__lowerCamelCase , __lowerCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__SCREAMING_SNAKE_CASE : int = Pipe()
__SCREAMING_SNAKE_CASE : Optional[Any] = Pipe()
process_array_.append(
Process(
target=__lowerCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__SCREAMING_SNAKE_CASE : Any = temp_rs
__SCREAMING_SNAKE_CASE : Dict = temp_rr
for i in range(1 , len(__lowerCamelCase ) - 1 ):
__SCREAMING_SNAKE_CASE : Optional[int] = Pipe()
__SCREAMING_SNAKE_CASE : List[Any] = Pipe()
process_array_.append(
Process(
target=__lowerCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__SCREAMING_SNAKE_CASE : int = temp_rs
__SCREAMING_SNAKE_CASE : List[Any] = temp_rr
process_array_.append(
Process(
target=__lowerCamelCase , args=(
len(__lowerCamelCase ) - 1,
arr[len(__lowerCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__lowerCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Dict = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = odd_even_transposition(__lowerCamelCase )
print("Sorted List\n" )
print(*__lowerCamelCase )
if __name__ == "__main__":
main()
| 447
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = """altclip_text_model"""
def __init__( self : Union[str, Any] , UpperCamelCase : List[Any]=2_5_0_0_0_2 , UpperCamelCase : int=1_0_2_4 , UpperCamelCase : Dict=2_4 , UpperCamelCase : Dict=1_6 , UpperCamelCase : Dict=4_0_9_6 , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Union[str, Any]=5_1_4 , UpperCamelCase : List[Any]=1 , UpperCamelCase : Any=0.0_2 , UpperCamelCase : Any=0.0_2 , UpperCamelCase : Union[str, Any]=1E-05 , UpperCamelCase : List[str]=1 , UpperCamelCase : Dict=0 , UpperCamelCase : Tuple=2 , UpperCamelCase : Union[str, Any]="absolute" , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=7_6_8 , **UpperCamelCase : str , )->Any:
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : Any = initializer_range
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : List[str] = use_cache
__SCREAMING_SNAKE_CASE : Any = project_dim
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = """altclip_vision_model"""
def __init__( self : Dict , UpperCamelCase : Tuple=7_6_8 , UpperCamelCase : Optional[int]=3_0_7_2 , UpperCamelCase : Optional[Any]=5_1_2 , UpperCamelCase : int=1_2 , UpperCamelCase : List[Any]=1_2 , UpperCamelCase : List[str]=3 , UpperCamelCase : List[Any]=2_2_4 , UpperCamelCase : int=3_2 , UpperCamelCase : Optional[Any]="quick_gelu" , UpperCamelCase : List[Any]=1E-5 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Tuple=0.0_2 , UpperCamelCase : str=1.0 , **UpperCamelCase : Tuple , )->Optional[int]:
super().__init__(**UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = projection_dim
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
__SCREAMING_SNAKE_CASE : str = image_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : Tuple = attention_dropout
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
@classmethod
def __snake_case ( cls : Union[str, Any] , UpperCamelCase : Union[str, os.PathLike] , **UpperCamelCase : Optional[Any] )->"PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
__SCREAMING_SNAKE_CASE : List[str] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = """altclip"""
lowerCAmelCase = True
def __init__( self : Optional[Any] , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , UpperCamelCase : Union[str, Any]=7_6_8 , UpperCamelCase : Optional[Any]=2.6_5_9_2 , **UpperCamelCase : int )->Any:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("text_config_dict" , UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("vision_config_dict" , UpperCamelCase )
super().__init__(**UpperCamelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__SCREAMING_SNAKE_CASE : Tuple = {}
# This is the complete result when using `text_config_dict`.
__SCREAMING_SNAKE_CASE : Optional[Any] = AltCLIPTextConfig(**UpperCamelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__SCREAMING_SNAKE_CASE : List[str] = (
F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
F"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
F"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(UpperCamelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__SCREAMING_SNAKE_CASE : int = {}
# This is the complete result when using `vision_config_dict`.
__SCREAMING_SNAKE_CASE : Optional[int] = AltCLIPVisionConfig(**UpperCamelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__SCREAMING_SNAKE_CASE : List[Any] = {
str(UpperCamelCase ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__SCREAMING_SNAKE_CASE : Dict = (
F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
__SCREAMING_SNAKE_CASE : Dict = (
F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
F"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(UpperCamelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__SCREAMING_SNAKE_CASE : List[Any] = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
__SCREAMING_SNAKE_CASE : List[str] = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
__SCREAMING_SNAKE_CASE : Tuple = AltCLIPTextConfig(**UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = AltCLIPVisionConfig(**UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = projection_dim
__SCREAMING_SNAKE_CASE : int = logit_scale_init_value
__SCREAMING_SNAKE_CASE : List[Any] = 1.0
@classmethod
def __snake_case ( cls : Dict , UpperCamelCase : AltCLIPTextConfig , UpperCamelCase : AltCLIPVisionConfig , **UpperCamelCase : int )->Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase )
def __snake_case ( self : str )->List[str]:
__SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : Dict = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE : int = self.__class__.model_type
return output
| 447
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = "▁"
UpperCAmelCase__ : Dict = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : int = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
UpperCAmelCase__ : Dict = {
"facebook/xglm-564M": 20_48,
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :List[Any] = VOCAB_FILES_NAMES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Tuple = ['input_ids', 'attention_mask']
def __init__( self : int , __magic_name__ : str , __magic_name__ : Optional[int]="<s>" , __magic_name__ : int="</s>" , __magic_name__ : Optional[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : List[str]="<unk>" , __magic_name__ : Optional[int]="<pad>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Optional[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCAmelCase__ = 7
lowerCAmelCase__ = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCAmelCase__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is None:
return [1] + ([0] * len(__magic_name__ ))
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ ))
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Dict ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Tuple ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = "".join(__magic_name__ ).replace(__magic_name__ , " " ).strip()
return out_string
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
| 48
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 39
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 525
|
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __A ( ):
"""simple docstring"""
__a = Github(os.environ["GITHUB_TOKEN"] )
__a = g.get_repo("huggingface/transformers" )
__a = repo.get_issues(state="open" )
for issue in open_issues:
__a = sorted([comment for comment in issue.get_comments()] , key=lambda _A : i.created_at , reverse=_A )
__a = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 525
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False
snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False
snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
snake_case_ = [3, 3, 3, 3]
snake_case_ = [5, 5, 5, 5]
elif "fl4" in model_name:
snake_case_ = [4, 4, 4, 4]
snake_case_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
snake_case_ = [3, 3, 3, 3]
if "lrf" in model_name:
snake_case_ = [3, 3, 3, 3]
else:
snake_case_ = [2, 2, 2, 2]
if "tiny" in model_name:
snake_case_ = 96
elif "small" in model_name:
snake_case_ = 96
elif "base" in model_name:
snake_case_ = 128
elif "large" in model_name:
snake_case_ = 192
elif "xlarge" in model_name:
snake_case_ = 256
elif "huge" in model_name:
snake_case_ = 352
# set label information
snake_case_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
snake_case_ = '''imagenet-22k-id2label.json'''
else:
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , focal_levels=SCREAMING_SNAKE_CASE__ , focal_windows=SCREAMING_SNAKE_CASE__ , use_conv_embed=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , use_post_layernorm=SCREAMING_SNAKE_CASE__ , use_layerscale=SCREAMING_SNAKE_CASE__ , )
return config
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if "patch_embed.proj" in name:
snake_case_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
snake_case_ = '''encoder.''' + name
if "encoder.layers" in name:
snake_case_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
snake_case_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
snake_case_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
snake_case_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
snake_case_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
snake_case_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
snake_case_ = '''layernorm.weight'''
if name == "norm.bias":
snake_case_ = '''layernorm.bias'''
if "head" in name:
snake_case_ = name.replace('''head''' , '''classifier''' )
else:
snake_case_ = '''focalnet.''' + name
return name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
# fmt: off
snake_case_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
snake_case_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
snake_case_ = get_focalnet_config(SCREAMING_SNAKE_CASE__ )
snake_case_ = FocalNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify conversion
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE__ , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ , )
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
snake_case_ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
snake_case_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
snake_case_ = image_transforms(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
snake_case_ = model(**SCREAMING_SNAKE_CASE__ )
snake_case_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
snake_case_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
snake_case_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
snake_case_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
snake_case_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
snake_case_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
snake_case_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 39
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__a = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(PATH_TO_TRANSFORMERS)
__a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__a = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__a = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __lowercase ( _UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : Tuple = None
# source code of `config_class`
lowercase : Dict = inspect.getsource(_UpperCamelCase )
lowercase : List[str] = _re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowercase : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowercase : List[str] = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowercase : Dict = ckpt_name
break
return checkpoint
def __lowercase ( ) ->str:
"""simple docstring"""
lowercase : str = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowercase : Optional[int] = get_checkpoint_from_config_class(_UpperCamelCase )
lowercase : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Any = '''\n'''.join(sorted(_UpperCamelCase ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 319
| 0
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_A = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Optional[int]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Dict:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowercase , id=_lowercase )
| 712
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[str]:
return 1 / (1 + np.exp(-z ))
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
return (-y * np.log(__UpperCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCAmelCase , __UpperCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=70_000 ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = np.zeros(x.shape[1] )
for iterations in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cost_function(__UpperCAmelCase , __UpperCAmelCase )
if iterations % 100 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_A = datasets.load_iris()
_A = iris.data[:, :2]
_A = (iris.target != 0) * 1
_A = 0.1
_A = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print('theta: ', theta) # printing the theta i.e our weights vector
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Union[str, Any]:
return sigmoid_function(
np.dot(__UpperCAmelCase , __UpperCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((_A) , (_A)) = (x[:, 0].min(), x[:, 0].max())
((_A) , (_A)) = (x[:, 1].min(), x[:, 1].max())
((_A) , (_A)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_A = np.c_[xxa.ravel(), xxa.ravel()]
_A = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 538
| 0
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: str ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: Any ) -> Dict:
snake_case__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe.dual_guided(
prompt='first prompt' , image=UpperCamelCase , text_to_image_strength=0.75 , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase )
snake_case__ = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = generator.manual_seed(0 )
snake_case__ = pipe.dual_guided(
prompt='first prompt' , image=UpperCamelCase , text_to_image_strength=0.75 , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowerCAmelCase_ ( self: List[str] ) -> str:
snake_case__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = 'cyberpunk 2077'
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe.dual_guided(
prompt=UpperCamelCase , image=UpperCamelCase , text_to_image_strength=0.75 , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
snake_case__ = 'A painting of a squirrel eating a burger '
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe.text_to_image(
prompt=UpperCamelCase , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
snake_case__ = pipe.image_variation(UpperCamelCase , generator=UpperCamelCase , output_type='numpy' ).images
snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 328
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : Tuple = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__lowerCAmelCase : Optional[int] = '''CIDAS/clipseg-rd64-refined'''
__lowerCAmelCase : Optional[Any] = '''image_segmenter'''
__lowerCAmelCase : int = CLIPSegForImageSegmentation
__lowerCAmelCase : List[str] = ['''image''', '''text''']
__lowerCAmelCase : Union[str, Any] = ['''image''']
def __init__( self : List[Any] , *a : Optional[int] , **a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*a , **a )
def _lowerCAmelCase ( self : Optional[int] , a : "Image" , a : str ) -> int:
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=a , return_tensors='''pt''' )
def _lowerCAmelCase ( self : int , a : Dict ) -> Dict:
"""simple docstring"""
with torch.no_grad():
lowercase = self.model(**a ).logits
return logits
def _lowerCAmelCase ( self : Union[str, Any] , a : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase = outputs.cpu().detach().numpy()
lowercase = 0
lowercase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 396
|
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
__lowerCAmelCase = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
__lowerCAmelCase = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__lowerCAmelCase = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 396
| 1
|
'''simple docstring'''
from string import ascii_uppercase
SCREAMING_SNAKE_CASE = {char: i for i, char in enumerate(ascii_uppercase)}
SCREAMING_SNAKE_CASE = dict(enumerate(ascii_uppercase))
def lowercase_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] =len(__A )
lowercase : Dict =0
while True:
if x == i:
lowercase : Dict =0
if len(__A ) == len(__A ):
break
key += key[i]
i += 1
return key
def lowercase_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
lowercase : Optional[int] =''''''
lowercase : List[Any] =0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase : Optional[Any] =(dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def lowercase_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
lowercase : int =''''''
lowercase : List[Any] =0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase : int =(dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : Any ='''THE GERMAN ATTACK'''
lowercase : Optional[Any] ='''SECRET'''
lowercase : List[str] =generate_key(__A , __A )
lowercase : Optional[int] =cipher_text(__A , __A )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(__A , __A )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 94
|
'''simple docstring'''
from math import isqrt
def lowercase_ ( __A : int ) -> list[int]:
"""simple docstring"""
lowercase : Dict =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __A , __A ):
lowercase : str =False
return [i for i in range(2 , __A ) if is_prime[i]]
def lowercase_ ( __A : int = 1_0**8 ) -> int:
"""simple docstring"""
lowercase : Dict =calculate_prime_numbers(max_number // 2 )
lowercase : str =0
lowercase : Optional[Any] =0
lowercase : Union[str, Any] =len(__A ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 94
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BertTokenizer
UpperCAmelCase__ = BertTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = filter_non_english
def snake_case ( self : List[str] ):
"""simple docstring"""
super().setUp()
_lowercase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case ( self : Dict , __A : int ):
"""simple docstring"""
_lowercase = "UNwant\u00E9d,running"
_lowercase = "unwanted, running"
return input_text, output_text
def snake_case ( self : Tuple ):
"""simple docstring"""
_lowercase = self.tokenizer_class(self.vocab_file )
_lowercase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__A , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def snake_case ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowercase = self.get_tokenizer()
_lowercase = self.get_rust_tokenizer()
_lowercase = "UNwant\u00E9d,running"
_lowercase = tokenizer.tokenize(__A )
_lowercase = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_lowercase = tokenizer.encode(__A , add_special_tokens=__A )
_lowercase = rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
_lowercase = self.get_rust_tokenizer()
_lowercase = tokenizer.encode(__A )
_lowercase = rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
# With lower casing
_lowercase = self.get_tokenizer(do_lower_case=__A )
_lowercase = self.get_rust_tokenizer(do_lower_case=__A )
_lowercase = "UNwant\u00E9d,running"
_lowercase = tokenizer.tokenize(__A )
_lowercase = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_lowercase = tokenizer.encode(__A , add_special_tokens=__A )
_lowercase = rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
_lowercase = self.get_rust_tokenizer()
_lowercase = tokenizer.encode(__A )
_lowercase = rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case ( self : int ):
"""simple docstring"""
_lowercase = BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = BasicTokenizer(do_lower_case=__A , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = BasicTokenizer()
_lowercase = "a\n'll !!to?'d of, can't."
_lowercase = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(__A ) , __A )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_lowercase = {}
for i, token in enumerate(__A ):
_lowercase = i
_lowercase = WordpieceTokenizer(vocab=__A , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def snake_case ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def snake_case ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def snake_case ( self : Tuple ):
"""simple docstring"""
_lowercase = self.get_tokenizer()
_lowercase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = self.tokenizer_class.from_pretrained("bert-base-uncased" )
_lowercase = tokenizer.encode("sequence builders" , add_special_tokens=__A )
_lowercase = tokenizer.encode("multi-sequence build" , add_special_tokens=__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def snake_case ( self : Optional[int] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase = self.rust_tokenizer_class.from_pretrained(__A , **__A )
_lowercase = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_lowercase = tokenizer_r.encode_plus(
__A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , )
_lowercase = tokenizer_r.do_lower_case if hasattr(__A , "do_lower_case" ) else False
_lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = ["的", "人", "有"]
_lowercase = "".join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase = True
_lowercase = self.tokenizer_class.from_pretrained(__A , **__A )
_lowercase = self.rust_tokenizer_class.from_pretrained(__A , **__A )
_lowercase = tokenizer_p.encode(__A , add_special_tokens=__A )
_lowercase = tokenizer_r.encode(__A , add_special_tokens=__A )
_lowercase = tokenizer_r.convert_ids_to_tokens(__A )
_lowercase = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
_lowercase = False
_lowercase = self.rust_tokenizer_class.from_pretrained(__A , **__A )
_lowercase = self.tokenizer_class.from_pretrained(__A , **__A )
_lowercase = tokenizer_r.encode(__A , add_special_tokens=__A )
_lowercase = tokenizer_p.encode(__A , add_special_tokens=__A )
_lowercase = tokenizer_r.convert_ids_to_tokens(__A )
_lowercase = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
_lowercase = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
| 602
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__magic_name__ : List[Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = ['''BeitFeatureExtractor''']
__magic_name__ : int = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[Any] = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 602
| 1
|
def __lowerCamelCase ( UpperCAmelCase_ : Dict = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 445
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case : Dict = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case : str = concatenate_datasets
snake_case : Any = DownloadConfig
snake_case : int = DownloadManager
snake_case : int = DownloadMode
snake_case : List[Any] = DownloadConfig
snake_case : List[str] = DownloadMode
snake_case : int = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 605
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase:List[Any] = AutoConfig.from_pretrained(a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:Optional[int] = TFAutoModel.from_pretrained(a__ ,from_pt=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:List[Any] = AutoModel.from_pretrained(a__ ,from_tf=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
@slow
def __UpperCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase:Optional[Any] = AutoConfig.from_pretrained(a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:List[Any] = TFAutoModelForPreTraining.from_pretrained(a__ ,from_pt=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:Optional[int] = AutoModelForPreTraining.from_pretrained(a__ ,from_tf=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
@slow
def __UpperCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase:Dict = AutoConfig.from_pretrained(a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(a__ ,from_pt=a__)
_lowerCAmelCase , _lowerCAmelCase:Any = TFAutoModelForCausalLM.from_pretrained(
a__ ,output_loading_info=a__ ,from_pt=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:Union[str, Any] = AutoModelForCausalLM.from_pretrained(a__ ,from_tf=a__)
_lowerCAmelCase , _lowerCAmelCase:Any = AutoModelForCausalLM.from_pretrained(
a__ ,output_loading_info=a__ ,from_tf=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
@slow
def __UpperCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase:Optional[int] = AutoConfig.from_pretrained(a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:Optional[Any] = TFAutoModelWithLMHead.from_pretrained(a__ ,from_pt=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:Tuple = AutoModelWithLMHead.from_pretrained(a__ ,from_tf=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
@slow
def __UpperCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase:str = AutoConfig.from_pretrained(a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:int = TFAutoModelForMaskedLM.from_pretrained(a__ ,from_pt=a__)
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(
a__ ,output_loading_info=a__ ,from_pt=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:Optional[int] = AutoModelForMaskedLM.from_pretrained(a__ ,from_tf=a__)
_lowerCAmelCase , _lowerCAmelCase:Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
a__ ,output_loading_info=a__ ,from_tf=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
@slow
def __UpperCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase:Optional[int] = AutoConfig.from_pretrained(a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:int = TFAutoModelForSeqaSeqLM.from_pretrained(a__ ,from_pt=a__)
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
a__ ,output_loading_info=a__ ,from_pt=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:Any = AutoModelForSeqaSeqLM.from_pretrained(a__ ,from_tf=a__)
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
a__ ,output_loading_info=a__ ,from_tf=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
@slow
def __UpperCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase:Optional[int] = AutoConfig.from_pretrained(a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(a__ ,from_pt=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:List[str] = AutoModelForSequenceClassification.from_pretrained(a__ ,from_tf=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
@slow
def __UpperCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase:Union[str, Any] = AutoConfig.from_pretrained(a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:Dict = TFAutoModelForQuestionAnswering.from_pretrained(a__ ,from_pt=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
_lowerCAmelCase:str = AutoModelForQuestionAnswering.from_pretrained(a__ ,from_tf=a__)
self.assertIsNotNone(a__)
self.assertIsInstance(a__ ,a__)
def __UpperCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Dict = TFAutoModelWithLMHead.from_pretrained(a__ ,from_pt=a__)
self.assertIsInstance(a__ ,a__)
self.assertEqual(model.num_parameters() ,1_4410)
self.assertEqual(model.num_parameters(only_trainable=a__) ,1_4410)
_lowerCAmelCase:Optional[Any] = AutoModelWithLMHead.from_pretrained(a__ ,from_tf=a__)
self.assertIsInstance(a__ ,a__)
self.assertEqual(model.num_parameters() ,1_4410)
self.assertEqual(model.num_parameters(only_trainable=a__) ,1_4410)
def __UpperCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = TFAutoModelWithLMHead.from_pretrained(a__ ,from_pt=a__)
self.assertIsInstance(a__ ,a__)
self.assertEqual(model.num_parameters() ,1_4410)
self.assertEqual(model.num_parameters(only_trainable=a__) ,1_4410)
_lowerCAmelCase:Optional[int] = AutoModelWithLMHead.from_pretrained(a__ ,from_tf=a__)
self.assertIsInstance(a__ ,a__)
self.assertEqual(model.num_parameters() ,1_4410)
self.assertEqual(model.num_parameters(only_trainable=a__) ,1_4410)
| 439
|
"""simple docstring"""
import baseaa
def UpperCAmelCase ( snake_case : str ):
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def UpperCAmelCase ( snake_case : bytes ):
return baseaa.aaadecode(snake_case ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 439
| 1
|
def __a ( __UpperCAmelCase ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
a__ = 1
a__ = 1
while repunit:
a__ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __a ( __UpperCAmelCase = 100_0000 ):
a__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(UpperCAmelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'{solution() = }')
| 194
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 209
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 714
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = GPTSwaTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = False
def snake_case_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
a_ : Optional[int] = GPTSwaTokenizer(a_ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self , a_ ):
a_ : Union[str, Any] = "This is a test"
a_ : Tuple = "This is a test"
return input_text, output_text
def snake_case_ ( self ):
a_ : List[str] = "<s>"
a_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def snake_case_ ( self ):
a_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(a_ ) , 2_0_0_0 )
def snake_case_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def snake_case_ ( self ):
a_ : Union[str, Any] = GPTSwaTokenizer(a_ )
a_ : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
a_ : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
a_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
a_ : List[str] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(a_ )
# fmt: off
self.assertListEqual(
a_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def snake_case_ ( self ):
a_ : List[str] = GPTSwaTokenizer(a_ )
a_ : List[Any] = ["This is a test", "I was born in 92000, and this is falsé."]
a_ : Optional[Any] = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(a_ , a_ ):
self.assertListEqual(tokenizer.encode_fast(a_ ) , a_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(a_ , a_ ):
self.assertEqual(tokenizer.decode_fast(a_ ) , a_ )
@slow
def snake_case_ ( self ):
a_ : Dict = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
a_ : Union[str, Any] = {"input_ids": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=a_ , )
| 370
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__UpperCAmelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Optional[int] = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase : Union[str, Any] = {
"unc-nlp/lxmert-base-uncased": 5_1_2,
}
__UpperCAmelCase : int = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = LxmertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase="[UNK]" ,UpperCamelCase="[SEP]" ,UpperCamelCase="[PAD]" ,UpperCamelCase="[CLS]" ,UpperCamelCase="[MASK]" ,UpperCamelCase=True ,UpperCamelCase=None ,**UpperCamelCase ,) -> int:
super().__init__(
UpperCamelCase ,tokenizer_file=UpperCamelCase ,do_lower_case=UpperCamelCase ,unk_token=UpperCamelCase ,sep_token=UpperCamelCase ,pad_token=UpperCamelCase ,cls_token=UpperCamelCase ,mask_token=UpperCamelCase ,tokenize_chinese_chars=UpperCamelCase ,strip_accents=UpperCamelCase ,**UpperCamelCase ,)
snake_case__ :List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" ,UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,UpperCamelCase ) != tokenize_chinese_chars
):
snake_case__ :List[Any] = getattr(UpperCamelCase ,normalizer_state.pop("type" ) )
snake_case__ :Dict = do_lower_case
snake_case__ :Union[str, Any] = strip_accents
snake_case__ :Optional[Any] = tokenize_chinese_chars
snake_case__ :List[str] = normalizer_class(**UpperCamelCase )
snake_case__ :Optional[int] = do_lower_case
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=None ) -> Tuple:
snake_case__ :Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :Dict = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 241
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _A , unittest.TestCase ):
_A = DDIMPipeline
_A = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_A = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
_A = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_A = False
def lowerCAmelCase_ ( self ) -> Any:
torch.manual_seed(0 )
snake_case__ :List[str] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
snake_case__ :int = DDIMScheduler()
snake_case__ :List[Any] = {"unet": unet, "scheduler": scheduler}
return components
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=0 ) -> Optional[Any]:
if str(UpperCamelCase ).startswith("mps" ):
snake_case__ :Dict = torch.manual_seed(UpperCamelCase )
else:
snake_case__ :Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ :List[str] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Tuple = "cpu"
snake_case__ :int = self.get_dummy_components()
snake_case__ :str = self.pipeline_class(**UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Optional[Any] = self.get_dummy_inputs(UpperCamelCase )
snake_case__ :Dict = pipe(**UpperCamelCase ).images
snake_case__ :Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 32, 32, 3) )
snake_case__ :Tuple = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
snake_case__ :Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase ,1E-3 )
def lowerCAmelCase_ ( self ) -> Any:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[int] = "google/ddpm-cifar10-32"
snake_case__ :int = UNetaDModel.from_pretrained(UpperCamelCase )
snake_case__ :List[str] = DDIMScheduler()
snake_case__ :Any = DDIMPipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
ddim.to(UpperCamelCase )
ddim.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Dict = torch.manual_seed(0 )
snake_case__ :Optional[Any] = ddim(generator=UpperCamelCase ,eta=0.0 ,output_type="numpy" ).images
snake_case__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ :str = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :int = "google/ddpm-ema-bedroom-256"
snake_case__ :Tuple = UNetaDModel.from_pretrained(UpperCamelCase )
snake_case__ :int = DDIMScheduler.from_pretrained(UpperCamelCase )
snake_case__ :Union[str, Any] = DDIMPipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
ddpm.to(UpperCamelCase )
ddpm.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :int = torch.manual_seed(0 )
snake_case__ :Optional[int] = ddpm(generator=UpperCamelCase ,output_type="numpy" ).images
snake_case__ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case__ :Optional[int] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 241
| 1
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a : Optional[Any]= (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , 'sklearn' )
return (preds == labels).mean()
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ) -> str:
'''simple docstring'''
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , 'sklearn' )
__snake_case : List[str] = simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case : Union[str, Any] = fa_score(y_true=UpperCAmelCase_ , y_pred=UpperCAmelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __UpperCAmelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ) -> Any:
'''simple docstring'''
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , 'sklearn' )
__snake_case : List[Any] = pearsonr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
__snake_case : Any = spearmanr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , 'sklearn' )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ), F"Predictions and labels have mismatched lengths {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(UpperCAmelCase_ , UpperCAmelCase_ )
elif task_name == "qqp":
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(UpperCAmelCase_ )
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , 'sklearn' )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(UpperCAmelCase_ )
| 710
|
"""simple docstring"""
import math
def __UpperCAmelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
__snake_case : List[str] = len(UpperCAmelCase_ )
__snake_case : List[Any] = int(math.floor(math.sqrt(UpperCAmelCase_ ) ) )
__snake_case : Any = 0
while arr[min(UpperCAmelCase_ , UpperCAmelCase_ ) - 1] < x:
__snake_case : Tuple = step
step += int(math.floor(math.sqrt(UpperCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__snake_case : Union[str, Any] = prev + 1
if prev == min(UpperCAmelCase_ , UpperCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_a : str= input("Enter numbers separated by a comma:\n").strip()
_a : int= [int(item) for item in user_input.split(",")]
_a : Optional[Any]= int(input("Enter the number to be searched:\n"))
_a : Tuple= jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 192
| 0
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : List[str] = split_dict._to_yaml_list()
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
lowercase__ : Tuple = SplitDict._from_yaml_list(__lowerCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowercase__ : str = None
# the split name of split_dict takes over the name of the split info object
lowercase__ : Any = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=__lowerCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowercase__ : List[Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 560
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = "roformer"
def __init__( self : Any ,_snake_case : str=50_000 ,_snake_case : int=None ,_snake_case : int=768 ,_snake_case : Tuple=12 ,_snake_case : Dict=12 ,_snake_case : Dict=3_072 ,_snake_case : Tuple="gelu" ,_snake_case : List[Any]=0.1 ,_snake_case : List[Any]=0.1 ,_snake_case : Optional[Any]=1_536 ,_snake_case : Dict=2 ,_snake_case : Union[str, Any]=0.02 ,_snake_case : Optional[Any]=1e-12 ,_snake_case : Optional[Any]=0 ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,**_snake_case : Optional[int] ,) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
lowercase__ : Optional[int] = vocab_size
lowercase__ : int = hidden_size if embedding_size is None else embedding_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : str = hidden_act
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Optional[int] = initializer_range
lowercase__ : List[Any] = layer_norm_eps
lowercase__ : Optional[Any] = rotary_value
lowercase__ : Optional[int] = use_cache
class __A ( A_ ):
'''simple docstring'''
@property
def UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ : List[Any] = {0: '''batch''', 1: '''sequence'''}
lowercase__ : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 560
| 1
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A: Optional[Any] = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class __magic_name__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase=None , _lowercase=1 ) -> List[Any]:
lowercase_ : List[Any] = tokenizer
lowercase_ : int = dataset
lowercase_ : str = len(__SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
lowercase_ : int = n_copies
def __iter__( self ) -> str:
lowercase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowercase_ : str = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __magic_name__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Tuple:
lowercase_ : List[str] = start_length
lowercase_ : List[Any] = eof_strings
lowercase_ : str = tokenizer
def __call__( self , _lowercase , _lowercase , **_lowercase ) -> str:
lowercase_ : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowercase_ : List[str] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[Any] = re.split('(%s)' % '|'.join(_UpperCAmelCase ) , _UpperCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def _UpperCAmelCase ( a : List[str] , a : Optional[int] , a : int , a : Union[str, Any] , a : Tuple , a : List[Any]=2_0 , **a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = defaultdict(_UpperCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_UpperCAmelCase ) ):
with torch.no_grad():
lowercase_ : int = batch['ids'].shape[-1]
lowercase_ : Tuple = accelerator.unwrap_model(_UpperCAmelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_UpperCAmelCase , **_UpperCAmelCase )
# each task is generated batch_size times
lowercase_ : Union[str, Any] = batch['task_id'].repeat(_UpperCAmelCase )
lowercase_ : Any = accelerator.pad_across_processes(
_UpperCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowercase_ , lowercase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
lowercase_ : Any = generated_tokens.cpu().numpy()
lowercase_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_UpperCAmelCase , _UpperCAmelCase ):
gen_token_dict[task].append(_UpperCAmelCase )
lowercase_ : Optional[Any] = [[] for _ in range(_UpperCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowercase_ : Dict = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
code_gens[task].append(remove_last_block(_UpperCAmelCase ) )
return code_gens
def _UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
# Setup configuration
lowercase_ : Tuple = HfArgumentParser(_UpperCAmelCase )
lowercase_ : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowercase_ : Tuple = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowercase_ : Tuple = 'false'
if args.num_workers is None:
lowercase_ : Optional[int] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowercase_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=_UpperCAmelCase )
# Load model and tokenizer
lowercase_ : str = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase_ : List[Any] = tokenizer.eos_token
lowercase_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowercase_ : int = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _UpperCAmelCase , _UpperCAmelCase )] ),
}
# Load evaluation dataset and metric
lowercase_ : Tuple = load_dataset('openai_humaneval' )
lowercase_ : str = load_metric('code_eval' )
lowercase_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowercase_ : Union[str, Any] = args.n_samples // args.batch_size
lowercase_ : List[Any] = TokenizedDataset(_UpperCAmelCase , human_eval['test'] , n_copies=_UpperCAmelCase , n_tasks=_UpperCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowercase_ : str = DataLoader(_UpperCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowercase_ : Union[str, Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowercase_ , lowercase_ : str = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
lowercase_ : List[Any] = complete_code(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , n_tasks=_UpperCAmelCase , batch_size=args.batch_size , **_UpperCAmelCase , )
if accelerator.is_main_process:
lowercase_ : Dict = []
for task in tqdm(range(_UpperCAmelCase ) ):
lowercase_ : int = human_eval['test'][task]['test']
lowercase_ : Optional[Any] = f"check({human_eval['test'][task]['entry_point']})"
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowercase_ , lowercase_ : Dict = code_eval_metric.compute(
references=_UpperCAmelCase , predictions=_UpperCAmelCase , num_workers=args.num_workers )
print(f"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 715
|
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7
| 0
|
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowercase__ : List[Any] = []
lowercase__ : Optional[Any] = 1
while len(_SCREAMING_SNAKE_CASE ) < 1E6:
constant.append(str(_SCREAMING_SNAKE_CASE ) )
i += 1
lowercase__ : List[str] = """""".join(_SCREAMING_SNAKE_CASE )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 12
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase ( __a ):
def A_ (self ) -> Any:
UpperCamelCase_ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , """num_attention_heads""" ) )
class UpperCamelCase :
def __init__(self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=64 , __UpperCamelCase=3 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=16 , __UpperCamelCase=[128, 256, 384] , __UpperCamelCase=[4, 6, 8] , __UpperCamelCase=[2, 3, 4] , __UpperCamelCase=[16, 16, 16] , __UpperCamelCase=0 , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=2 , ) -> Optional[int]:
UpperCamelCase_ : Tuple = parent
UpperCamelCase_ : Optional[Any] = batch_size
UpperCamelCase_ : Dict = image_size
UpperCamelCase_ : Dict = num_channels
UpperCamelCase_ : Optional[Any] = kernel_size
UpperCamelCase_ : int = stride
UpperCamelCase_ : str = padding
UpperCamelCase_ : Tuple = hidden_sizes
UpperCamelCase_ : int = num_attention_heads
UpperCamelCase_ : List[str] = depths
UpperCamelCase_ : Dict = key_dim
UpperCamelCase_ : Any = drop_path_rate
UpperCamelCase_ : List[Any] = patch_size
UpperCamelCase_ : Any = attention_ratio
UpperCamelCase_ : Optional[Any] = mlp_ratio
UpperCamelCase_ : Optional[int] = initializer_range
UpperCamelCase_ : Optional[Any] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase_ : Tuple = is_training
UpperCamelCase_ : Any = use_labels
UpperCamelCase_ : Dict = num_labels
UpperCamelCase_ : List[str] = initializer_range
def A_ (self ) -> Dict:
UpperCamelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : List[str] = None
if self.use_labels:
UpperCamelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A_ (self ) -> Optional[int]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
UpperCamelCase_ : int = LevitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : List[Any] = model(__UpperCamelCase )
UpperCamelCase_ : int = (self.image_size, self.image_size)
UpperCamelCase_,UpperCamelCase_ : Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ : Union[str, Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase_ : List[Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
UpperCamelCase_ : List[str] = self.num_labels
UpperCamelCase_ : Any = LevitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : int = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ (self ) -> str:
UpperCamelCase_ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : Any = config_and_inputs
UpperCamelCase_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __a , __a , unittest.TestCase ):
a__ :Any = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
a__ :str = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ :Optional[Any] = False
a__ :Optional[int] = False
a__ :Tuple = False
a__ :List[str] = False
a__ :Dict = False
def A_ (self ) -> List[Any]:
UpperCamelCase_ : int = LevitModelTester(self )
UpperCamelCase_ : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def A_ (self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ (self ) -> Optional[Any]:
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def A_ (self ) -> int:
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def A_ (self ) -> Any:
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def A_ (self ) -> List[str]:
pass
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_,UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : str = model_class(__UpperCamelCase )
UpperCamelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Dict = [*signature.parameters.keys()]
UpperCamelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def A_ (self ) -> Optional[Any]:
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : List[str] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ : Optional[Any] = outputs.hidden_states
UpperCamelCase_ : Optional[int] = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCamelCase_ : Tuple = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase_,UpperCamelCase_ : Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ : Dict = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase_ : List[str] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Union[str, Any] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A_ (self ) -> Dict:
pass
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Tuple:
UpperCamelCase_ : List[str] = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A_ (self ) -> Tuple:
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def A_ (self ) -> List[Any]:
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def A_ (self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_,UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : List[Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase_ : int = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
UpperCamelCase_ : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
UpperCamelCase_ : int = model(**__UpperCamelCase ).loss
loss.backward()
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_,UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : str = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase_ : str = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
UpperCamelCase_ : Optional[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_,UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Any = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
UpperCamelCase_ : Any = problem_type["""title"""]
UpperCamelCase_ : Dict = problem_type["""num_labels"""]
UpperCamelCase_ : Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
UpperCamelCase_ : Optional[int] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
UpperCamelCase_ : Union[str, Any] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
UpperCamelCase_ : Tuple = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
UpperCamelCase_ : str = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A_ (self ) -> Dict:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Any = LevitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def A_ (self ) -> Any:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A_ (self ) -> str:
UpperCamelCase_ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = self.default_image_processor
UpperCamelCase_ : List[str] = prepare_img()
UpperCamelCase_ : Optional[int] = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase_ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ : Any = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 635
| 0
|
from math import isqrt
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> List[str]:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCamelCase_ ) + 1 ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 10**6 ) -> str:
_lowercase = 0
_lowercase = 1
_lowercase = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCamelCase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 702
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """spiece.model"""}
snake_case = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
snake_case = {
"""AI-Sweden/gpt-sw3-126m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-350m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-1.6b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-6.7b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-20b""": 2_0_4_8,
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Any ,__A : Tuple ,__A : Any=False ,__A : int=False ,__A : List[str]=False ,__A : List[str]=None ,__A : Dict=None ,__A : Dict=None ,__A : Union[str, Any]=None ,__A : Optional[Dict[str, Any]] = None ,**__A : Tuple ,) -> None:
_lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowercase = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_lowercase = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowercase = '<|endoftext|>' if eos_token is None else eos_token
_lowercase = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowercase = unk_token if pad_token is None else pad_token
_lowercase = eos_token if bos_token is None else bos_token
else:
_lowercase = '<pad>' if pad_token is None else pad_token
_lowercase = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=__A ,remove_space=__A ,keep_accents=__A ,bos_token=__A ,eos_token=__A ,unk_token=__A ,pad_token=__A ,sp_model_kwargs=self.sp_model_kwargs ,**__A ,)
_lowercase = do_lower_case
_lowercase = remove_space
_lowercase = keep_accents
_lowercase = vocab_file
_lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowercase = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowercase = re.compile(
F"""[{"".join(map(__A ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self : List[Any] ) -> List[str]:
_lowercase = self.__dict__.copy()
_lowercase = None
return state
def __setstate__( self : Optional[Any] ,__A : Dict ) -> str:
_lowercase = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowercase = {}
_lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __UpperCAmelCase ( self : List[Any] ) -> int:
return len(self.sp_model )
def __UpperCAmelCase ( self : Optional[Any] ,__A : str ) -> str:
_lowercase = self.non_printing_characters_re.sub('' ,__A )
# Normalize whitespaces
_lowercase = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_lowercase = unicodedata.normalize('NFC' ,__A )
return text
def __UpperCAmelCase ( self : Optional[Any] ,__A : str ,**__A : Optional[int] ) -> List[str]:
_lowercase = self.preprocess_text(__A )
return self.sp_model.encode(__A ,out_type=__A )
def __UpperCAmelCase ( self : List[Any] ,__A : str ) -> int:
return self.sp_model.PieceToId(__A )
def __UpperCAmelCase ( self : Any ,__A : int ) -> str:
return self.sp_model.IdToPiece(__A )
@staticmethod
def __UpperCAmelCase ( __A : str ) -> str:
return out_string
def __UpperCAmelCase ( self : Tuple ,__A : List[str] ) -> str:
_lowercase = []
_lowercase = ''
_lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowercase = True
_lowercase = []
else:
current_sub_tokens.append(__A )
_lowercase = False
out_string += self.sp_model.decode(__A )
return out_string
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict[str, int]:
_lowercase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self : Optional[int] ,__A : str ,__A : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase = os.path.join(
__A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A ,'wb' ) as fi:
_lowercase = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def __UpperCAmelCase ( self : str ,__A : Union[str, List[str]] ,__A : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__A ,__A ):
_lowercase = self.preprocess_text(__A )
_lowercase = self.sp_model.encode(__A )
else:
_lowercase = [self.preprocess_text(__A ) for t in text]
_lowercase = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowercase = torch.tensor(__A )
return token_ids
def __UpperCAmelCase ( self : Optional[Any] ,__A : Union[int, List[int]] ) -> str:
return self.sp_model.decode(__A )
def __UpperCAmelCase ( self : str ,__A : "Conversation" ) -> List[int]:
_lowercase = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
_lowercase = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__A ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=__A )
| 535
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Any ) -> List[str]:
_A = TextaTextGenerationPipeline(model=UpperCamelCase__, tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any] ) -> List[str]:
_A = generator('Something there' )
self.assertEqual(UpperCamelCase__, [{'generated_text': ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_A = generator(['This is great !', 'Something else'], num_return_sequences=2, do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__, [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
], )
_A = generator(
['This is great !', 'Something else'], num_return_sequences=2, batch_size=2, do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__, [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
], )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = pipeline('text2text-generation', model='patrickvonplaten/t5-tiny-random', framework='pt' )
# do_sample=False necessary for reproducibility
_A = generator('Something there', do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, [{'generated_text': ''}] )
_A = 3
_A = generator(
'Something there', num_return_sequences=UpperCamelCase__, num_beams=UpperCamelCase__, )
_A = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
_A = generator('This is a test', do_sample=UpperCamelCase__, num_return_sequences=2, return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__, [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
], )
_A = generator.model.config.eos_token_id
_A = '<pad>'
_A = generator(
['This is a test', 'This is a second test'], do_sample=UpperCamelCase__, num_return_sequences=2, batch_size=2, return_tensors=UpperCamelCase__, )
self.assertEqual(
UpperCamelCase__, [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
], )
@require_tf
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_A = pipeline('text2text-generation', model='patrickvonplaten/t5-tiny-random', framework='tf' )
# do_sample=False necessary for reproducibility
_A = generator('Something there', do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, [{'generated_text': ''}] )
| 107
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _A( lowerCAmelCase , lowerCAmelCase=False ):
A__ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ : List[str] = """"""
else:
A__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
A__ : Tuple = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ : Tuple = in_proj_weight[
: config.hidden_size, :
]
A__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
A__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : int = in_proj_weight[
-config.hidden_size :, :
]
A__ : List[Any] = in_proj_bias[-config.hidden_size :]
def _A( lowerCAmelCase ):
A__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def _A( lowerCAmelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
A__ : Dict = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : Optional[Any] = dct.pop(lowerCAmelCase )
A__ : List[Any] = val
def _A( lowerCAmelCase , lowerCAmelCase ):
A__ : str = ViTMSNConfig()
A__ : List[str] = 1000
A__ : Optional[int] = """datasets/huggingface/label-files"""
A__ : Optional[int] = """imagenet-1k-id2label.json"""
A__ : str = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase ) , """r""" ) )
A__ : Tuple = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
A__ : Optional[Any] = idalabel
A__ : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
A__ : Tuple = 384
A__ : int = 1536
A__ : Union[str, Any] = 6
elif "l16" in checkpoint_url:
A__ : int = 1024
A__ : int = 4096
A__ : Any = 24
A__ : int = 16
A__ : Union[str, Any] = 0.1
elif "b4" in checkpoint_url:
A__ : Dict = 4
elif "l7" in checkpoint_url:
A__ : List[str] = 7
A__ : Optional[int] = 1024
A__ : Optional[int] = 4096
A__ : Any = 24
A__ : Union[str, Any] = 16
A__ : Tuple = 0.1
A__ : List[str] = ViTMSNModel(lowerCAmelCase )
A__ : Any = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
A__ : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCAmelCase )
A__ : List[str] = create_rename_keys(lowerCAmelCase , base_model=lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , base_model=lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
A__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
A__ : List[Any] = ViTImageProcessor(
size=config.image_size , image_mean=lowerCAmelCase , image_std=lowerCAmelCase )
A__ : Any = image_processor(images=lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
A__ : Tuple = model(**lowerCAmelCase )
A__ : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
A__ : Union[str, Any] = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
A__ : int = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
A__ : List[Any] = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
A__ : Optional[Any] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
A__ : str = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_UpperCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 363
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : int = tempfile.mkdtemp()
A : Optional[int] = 8
# DPR tok
A : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A : Optional[int] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
A : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
A : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
A : Tuple = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
A : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A : str = {'''unk_token''': '''<unk>'''}
A : Any = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
A : str = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
A : List[str] = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __lowerCAmelCase ( self ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Any = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
A : List[str] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
A : Tuple = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(SCREAMING_SNAKE_CASE )
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : int = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
A : Union[str, Any] = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
A : Dict = tokenizer(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : str = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
A : List[str] = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
A : Tuple = tokenizer(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
| 343
|
'''simple docstring'''
import math
import sys
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = ''''''
try:
with open(snake_case__ , '''rb''' ) as binary_file:
A : Optional[Any] = binary_file.read()
for dat in data:
A : Union[str, Any] = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = {'''0''': '''0''', '''1''': '''1'''}
A, A : Union[str, Any] = '''''', ''''''
A : str = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A : Dict = lexicon[curr_string]
result += last_match_id
A : Any = last_match_id + '''0'''
if math.loga(snake_case__ ).is_integer():
A : Optional[int] = {}
for curr_key in list(snake_case__ ):
A : Any = lexicon.pop(snake_case__ )
A : List[str] = new_lex
A : Dict = last_match_id + '''1'''
index += 1
A : List[str] = ''''''
return result
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = 8
try:
with open(snake_case__ , '''wb''' ) as opened_file:
A : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case__ ) , snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A : Union[str, Any] = data_bits[counter:]
A : Tuple = data_bits[counter + 1 :]
return data_bits
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = read_file_binary(snake_case__ )
A : Dict = remove_prefix(snake_case__ )
A : Union[str, Any] = decompress_data(snake_case__ )
write_file_binary(snake_case__ , snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 343
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
if len(snake_case_ ) == 0:
return False
__UpperCAmelCase = len(snake_case_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case_ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case_ )
if __name__ == "__main__":
_lowercase : str = input('Enter numbers separated by comma:\n').strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(',')]
_lowercase : str = int(input('Enter the number to be found in the list:\n').strip())
_lowercase : Union[str, Any] = '' if binary_search(sequence, target) else 'not '
print(f"""{target} was {not_str}found in {sequence}""")
| 49
|
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49
| 1
|
"""simple docstring"""
import math
__SCREAMING_SNAKE_CASE =10
__SCREAMING_SNAKE_CASE =7
__SCREAMING_SNAKE_CASE =BALLS_PER_COLOUR * NUM_COLOURS
def lowercase__( __SCREAMING_SNAKE_CASE : int = 20 ):
lowercase_ : Dict = math.comb(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 477
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE ={
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 477
| 1
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__: Dict = logging.get_logger(__name__)
a__: Any = {
"nielsr/canine-s": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
a__: Dict = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
a__: Any = 0
a__: Dict = 0xE_0_0_0
a__: Optional[Any] = 0xE_0_0_1
a__: Optional[Any] = 0xE_0_0_2
a__: List[str] = 0xE_0_0_3
a__: Any = 0xE_0_0_4
# Maps special codepoints to human-readable names.
a__: Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
a__: Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self,__lowerCamelCase=chr(UpperCamelCase_ ),__lowerCamelCase=chr(UpperCamelCase_ ),__lowerCamelCase=chr(UpperCamelCase_ ),__lowerCamelCase=chr(UpperCamelCase_ ),__lowerCamelCase=chr(UpperCamelCase_ ),__lowerCamelCase=chr(UpperCamelCase_ ),__lowerCamelCase=False,__lowerCamelCase=2048,**__lowerCamelCase,):
A__ = AddedToken(UpperCamelCase_,lstrip=UpperCamelCase_,rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_,UpperCamelCase_ ) else bos_token
A__ = AddedToken(UpperCamelCase_,lstrip=UpperCamelCase_,rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_,UpperCamelCase_ ) else eos_token
A__ = AddedToken(UpperCamelCase_,lstrip=UpperCamelCase_,rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_,UpperCamelCase_ ) else sep_token
A__ = AddedToken(UpperCamelCase_,lstrip=UpperCamelCase_,rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_,UpperCamelCase_ ) else cls_token
A__ = AddedToken(UpperCamelCase_,lstrip=UpperCamelCase_,rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_,UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(UpperCamelCase_,lstrip=UpperCamelCase_,rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_,UpperCamelCase_ ) else mask_token
super().__init__(
bos_token=UpperCamelCase_,eos_token=UpperCamelCase_,sep_token=UpperCamelCase_,cls_token=UpperCamelCase_,pad_token=UpperCamelCase_,mask_token=UpperCamelCase_,add_prefix_space=UpperCamelCase_,model_max_length=UpperCamelCase_,**UpperCamelCase_,)
# Creates a mapping for looking up the IDs of special symbols.
A__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
A__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
A__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
A__ = UNICODE_VOCAB_SIZE
A__ = len(self._special_codepoints )
@property
def UpperCamelCase ( self ):
return self._unicode_vocab_size
def UpperCamelCase ( self,__lowerCamelCase ):
return list(UpperCamelCase_ )
def UpperCamelCase ( self,__lowerCamelCase ):
try:
return ord(UpperCamelCase_ )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def UpperCamelCase ( self,__lowerCamelCase ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase_ )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def UpperCamelCase ( self,__lowerCamelCase ):
return "".join(UpperCamelCase_ )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_,token_ids_a=UpperCamelCase_,already_has_special_tokens=UpperCamelCase_ )
A__ = [1] + ([0] * len(UpperCamelCase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase_ )) + [1]
return result
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
return ()
| 190
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
SCREAMING_SNAKE_CASE : Union[str, Any] = 50_003
SCREAMING_SNAKE_CASE : Any = 50_002
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Dict =PLBartTokenizer
lowercase : int =None
lowercase : Optional[int] =False
def UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ :Optional[Any] = PLBartTokenizer(UpperCamelCase_ , language_codes='''base''' , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
lowercase_ :int = PLBartTokenizer(UpperCamelCase_ , language_codes='''base''' , keep_accents=UpperCamelCase_ )
lowercase_ :Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ :List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase_ :Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ :List[str] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase_ :List[str] = tokenizer.vocab_size
lowercase_ :Union[str, Any] = [tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) for x in range(end - 4 , UpperCamelCase_ )]
self.assertListEqual(UpperCamelCase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
lowercase_ :Dict = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase_ :Union[str, Any] = tokenizer(UpperCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) , UpperCamelCase_ , )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = PLBartTokenizer(UpperCamelCase_ , language_codes='''multi''' , keep_accents=UpperCamelCase_ )
lowercase_ :str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ :Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase_ :Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ :int = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase_ :Union[str, Any] = tokenizer.vocab_size
lowercase_ :Optional[Any] = [tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) for x in range(end - 7 , UpperCamelCase_ )]
self.assertListEqual(
UpperCamelCase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
lowercase_ :List[Any] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase_ :Any = tokenizer(UpperCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) , UpperCamelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] ="""uclanlp/plbart-python-en_XX"""
lowercase : Union[str, Any] =[
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
lowercase : Union[str, Any] =[
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
lowercase : int =[
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def UpperCamelCase ( cls ):
lowercase_ :PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
lowercase_ :Any = 1
return cls
def UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_0003 )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def UpperCamelCase ( self ):
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
lowercase_ :Union[str, Any] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
lowercase_ :List[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowercase_ :Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , UpperCamelCase_ )
lowercase_ :Any = 10
lowercase_ :Union[str, Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_0004, 5_0001] )
def UpperCamelCase ( self ):
lowercase_ :int = tempfile.mkdtemp()
lowercase_ :str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
lowercase_ :Tuple = PLBartTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors='''pt''' )
lowercase_ :List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , UpperCamelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase_ :List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowercase_ :Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors='''pt''' )
lowercase_ :int = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors='''pt''' )
lowercase_ :str = targets['''input_ids''']
lowercase_ :List[Any] = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :int = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 5_0003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_0001,
} , )
| 257
| 0
|
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : int ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = int(lowercase )
assert noofclusters < len(lowercase )
# Find out the dimensionality
snake_case : Dict = len(vectors[0] )
# Will help select random centroids from among the available vectors
snake_case : List[str] = list(range(len(lowercase ) ) )
shuffle(lowercase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
snake_case : str = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
snake_case : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
snake_case : Tuple = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase )
]
##These nodes will assign the centroid Variables the appropriate
##values
snake_case : Dict = tf.placeholder("float64" , [dim] )
snake_case : Union[str, Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase , lowercase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
snake_case : int = [tf.Variable(0 ) for i in range(len(lowercase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
snake_case : List[str] = tf.placeholder("int32" )
snake_case : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase , lowercase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
snake_case : int = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
snake_case : Optional[Any] = tf.reduce_mean(lowercase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
snake_case : List[str] = tf.placeholder("float" , [dim] )
snake_case : Any = tf.placeholder("float" , [dim] )
snake_case : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
snake_case : Tuple = tf.placeholder("float" , [noofclusters] )
snake_case : Optional[int] = tf.argmin(lowercase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
snake_case : List[str] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
snake_case : str = 100
for _ in range(lowercase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase ) ):
snake_case : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
snake_case : Union[str, Any] = [
sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
snake_case : int = sess.run(
lowercase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase ):
# Collect all the vectors assigned to this cluster
snake_case : Optional[Any] = [
vectors[i]
for i in range(len(lowercase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
snake_case : Optional[Any] = sess.run(
lowercase , feed_dict={mean_input: array(lowercase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
snake_case : List[str] = sess.run(lowercase )
snake_case : Union[str, Any] = sess.run(lowercase )
return centroids, assignments
| 117
|
"""simple docstring"""
import baseaa
def __lowerCAmelCase ( lowercase : str ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode("utf-8" ) )
def __lowerCAmelCase ( lowercase : bytes ) -> str:
"""simple docstring"""
return baseaa.aaadecode(lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ShapEPipeline
__SCREAMING_SNAKE_CASE : Any = ["prompt"]
__SCREAMING_SNAKE_CASE : Dict = ["prompt"]
__SCREAMING_SNAKE_CASE : Optional[int] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__SCREAMING_SNAKE_CASE : int = False
@property
def a_ ( self ) -> List[Any]:
return 3_2
@property
def a_ ( self ) -> int:
return 3_2
@property
def a_ ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def a_ ( self ) -> str:
return 8
@property
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a_ ( self ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(UpperCAmelCase__ )
@property
def a_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase = {
'num_attention_heads': 2,
'attention_head_dim': 1_6,
'embedding_dim': self.time_input_dim,
'num_embeddings': 3_2,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCAmelCase = PriorTransformer(**UpperCAmelCase__ )
return model
@property
def a_ ( self ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase = {
'param_shapes': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 1_2,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase = ShapERenderer(**UpperCAmelCase__ )
return model
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.dummy_prior
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_renderer
UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=UpperCAmelCase__ , clip_sample=UpperCAmelCase__ , clip_sample_range=1.0 , )
UpperCAmelCase = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a_ ( self , lowercase_ , lowercase_=0 ) -> Union[str, Any]:
if str(UpperCAmelCase__ ).startswith('mps' ):
UpperCAmelCase = torch.manual_seed(UpperCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
UpperCAmelCase = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 3_2,
'output_type': 'np',
}
return inputs
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = 'cpu'
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**UpperCAmelCase__ )
UpperCAmelCase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
UpperCAmelCase = output.images[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
UpperCAmelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ) -> int:
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a_ ( self ) -> List[str]:
UpperCAmelCase = torch_device == 'cpu'
UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCAmelCase__ , relax_max_difference=UpperCAmelCase__ , )
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**UpperCAmelCase__ )
UpperCAmelCase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase = 1
UpperCAmelCase = 2
UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase = batch_size * [inputs[key]]
UpperCAmelCase = pipe(**UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a_ ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
UpperCAmelCase = ShapEPipeline.from_pretrained('openai/shap-e' )
UpperCAmelCase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
UpperCAmelCase = pipe(
'a shark' , generator=UpperCAmelCase__ , guidance_scale=1_5.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
| 373
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_lowerCamelCase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = set()
A__ = []
def parse_line(lowercase_ ):
for line in fp:
if isinstance(lowercase_ , lowercase_ ):
A__ = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(lowercase_ ) > 0:
A__ = '''\n'''.join(lowercase_ )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(lowercase_ )
buffer.clear()
continue
else:
A__ = line.strip()
buffer.append(lowercase_ )
if from_gh:
for filename in os.listdir(lowercase_ ):
A__ = os.path.join(lowercase_ , lowercase_ )
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowercase_ ) as fp:
parse_line(lowercase_ )
else:
try:
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowercase_ ) as fp:
parse_line(lowercase_ )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = set()
A__ = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
return values.split(''',''' )
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_lowerCamelCase : List[Any] = parser.parse_args()
_lowerCamelCase : List[str] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_lowerCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_lowerCamelCase : Any = extract_warnings(args.output_dir, args.targets)
_lowerCamelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 87
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "openai-gpt"
_SCREAMING_SNAKE_CASE : str = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self : Tuple , snake_case_ : int=4_0_4_7_8 , snake_case_ : Any=5_1_2 , snake_case_ : List[str]=7_6_8 , snake_case_ : int=1_2 , snake_case_ : Dict=1_2 , snake_case_ : str="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Tuple=1E-5 , snake_case_ : Any=0.02 , snake_case_ : Union[str, Any]="cls_index" , snake_case_ : Optional[Any]=True , snake_case_ : List[Any]=None , snake_case_ : Tuple=True , snake_case_ : Tuple=0.1 , **snake_case_ : Optional[Any] , ):
__a : int = vocab_size
__a : int = n_positions
__a : Any = n_embd
__a : Tuple = n_layer
__a : Tuple = n_head
__a : Any = afn
__a : str = resid_pdrop
__a : List[str] = embd_pdrop
__a : Optional[int] = attn_pdrop
__a : Optional[Any] = layer_norm_epsilon
__a : List[Any] = initializer_range
__a : str = summary_type
__a : Union[str, Any] = summary_use_proj
__a : Dict = summary_activation
__a : List[str] = summary_first_dropout
__a : Dict = summary_proj_to_labels
super().__init__(**snake_case_ )
| 700
|
import copy
import re
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[Any] = "hp"
_SCREAMING_SNAKE_CASE : List[str] = {}
_SCREAMING_SNAKE_CASE : Any = None
@classmethod
def lowerCAmelCase (cls : Tuple , snake_case_ : Any , snake_case_ : Optional[int] ):
__a : Optional[Any] = prefix
__a : Dict = defaults
cls.build_naming_info()
@staticmethod
def lowerCAmelCase (snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
if len(snake_case_ ) == 0:
return ""
__a : Optional[Any] = None
if any(char.isdigit() for char in word ):
raise Exception(f"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(snake_case_ ) + 1 ):
__a : int = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a : Tuple = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(snake_case_ : Optional[Any] ):
__a : Optional[int] = ''''''
while integer != 0:
__a : Any = chr(ord('''A''' ) + integer % 1_0 ) + s
integer //= 1_0
return s
__a : Optional[int] = 0
while True:
__a : Optional[int] = word + '''#''' + int_to_alphabetic(snake_case_ )
if sword in info["reverse_short_word"]:
continue
else:
__a : int = sword
break
__a : Any = short_word
__a : Dict = word
return short_word
@staticmethod
def lowerCAmelCase (snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
__a : List[str] = param_name.split('''_''' )
__a : int = [TrialShortNamer.shortname_for_word(snake_case_ , snake_case_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a : Union[str, Any] = ['''''', '''_''']
for separator in separators:
__a : Optional[Any] = separator.join(snake_case_ )
if shortname not in info["reverse_short_param"]:
__a : str = shortname
__a : Dict = param_name
return shortname
return param_name
@staticmethod
def lowerCAmelCase (snake_case_ : Tuple , snake_case_ : Any ):
__a : Any = TrialShortNamer.shortname_for_key(snake_case_ , snake_case_ )
__a : Any = short_name
__a : Any = param_name
@classmethod
def lowerCAmelCase (cls : Dict ):
if cls.NAMING_INFO is not None:
return
__a : List[str] = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
__a : str = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(snake_case_ , snake_case_ )
__a : str = info
@classmethod
def lowerCAmelCase (cls : str , snake_case_ : Tuple ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a : Optional[int] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a : List[Any] = cls.NAMING_INFO['''short_param'''][k]
if isinstance(snake_case_ , snake_case_ ):
__a : Optional[Any] = 1 if v else 0
__a : int = '''''' if isinstance(snake_case_ , (int, float) ) else '''-'''
__a : Any = f"{key}{sep}{v}"
name.append(snake_case_ )
return "_".join(snake_case_ )
@classmethod
def lowerCAmelCase (cls : List[Any] , snake_case_ : List[Any] ):
__a : Optional[int] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a : Union[str, Any] = []
else:
__a : List[str] = repr.split('''_''' )
__a : List[str] = {}
for value in values:
if "-" in value:
__a , __a : Optional[Any] = value.split('''-''' )
else:
__a : Dict = re.sub('''[0-9.]''' , '''''' , snake_case_ )
__a : List[str] = float(re.sub('''[^0-9.]''' , '''''' , snake_case_ ) )
__a : Dict = cls.NAMING_INFO['''reverse_short_param'''][p_k]
__a : int = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a : int = cls.DEFAULTS[k]
return parameters
| 326
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.