code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase = 16
_lowercase = 32
def A (__lowerCamelCase :Accelerator , __lowerCamelCase :int = 16 , __lowerCamelCase :str = "bert-base-cased" ):
_lowerCAmelCase = AutoTokenizer.from_pretrained(__lowerCamelCase )
_lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCamelCase :int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCamelCase :List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
_lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
def A (__lowerCamelCase :Any , __lowerCamelCase :Optional[int] , __lowerCamelCase :Tuple , __lowerCamelCase :Union[str, Any] ):
model.eval()
_lowerCAmelCase = 0
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase = model(**__lowerCamelCase )
_lowerCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCAmelCase , _lowerCAmelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__lowerCamelCase ) - 1:
_lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
_lowerCAmelCase = metric.compute()
return eval_metric["accuracy"]
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] ):
# Initialize accelerator
_lowerCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase = config["""lr"""]
_lowerCAmelCase = int(config["""num_epochs"""] )
_lowerCAmelCase = int(config["""seed"""] )
_lowerCAmelCase = int(config["""batch_size"""] )
_lowerCAmelCase = args.model_name_or_path
set_seed(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = get_dataloaders(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(__lowerCamelCase , return_dict=__lowerCamelCase )
# Instantiate optimizer
_lowerCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=__lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_lowerCAmelCase = 1
_lowerCAmelCase = (len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=0 , num_training_steps=__lowerCamelCase , )
else:
_lowerCAmelCase = DummyScheduler(__lowerCamelCase , total_num_steps=__lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase = 0
_lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
_lowerCAmelCase = num_epochs
if args.partial_train_epoch is not None:
_lowerCAmelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_lowerCAmelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
_lowerCAmelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_lowerCAmelCase = int(__lowerCamelCase ) + 1
_lowerCAmelCase = evaluation_loop(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
accelerator.print("""resumed checkpoint performance:""" , __lowerCamelCase )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
_lowerCAmelCase = json.load(__lowerCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_lowerCAmelCase = {}
for epoch in range(__lowerCamelCase , __lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
_lowerCAmelCase = model(**__lowerCamelCase )
_lowerCAmelCase = outputs.loss
_lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_lowerCAmelCase = f'epoch_{epoch}'
_lowerCAmelCase = os.path.join(args.output_dir , __lowerCamelCase )
accelerator.save_state(__lowerCamelCase )
_lowerCAmelCase = evaluation_loop(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = accuracy
_lowerCAmelCase = lr_scheduler.get_lr()[0]
_lowerCAmelCase = optimizer.param_groups[0]["""lr"""]
_lowerCAmelCase = epoch
_lowerCAmelCase = overall_step
accelerator.print(f'epoch {epoch}:' , __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def A ():
_lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__lowerCamelCase , )
parser.add_argument(
"""--output_dir""" , type=__lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=__lowerCamelCase , default=2 , help="""Number of train epochs.""" , )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 5
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_lowercase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def A (__lowerCamelCase :Optional[int] ):
_lowerCAmelCase = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
_lowerCAmelCase = line.strip()
if line:
_lowerCAmelCase = line.split()
_lowerCAmelCase = line_number
_lowerCAmelCase = words[0]
_lowerCAmelCase = value
return result
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any] , __lowerCamelCase :List[str] ):
for attribute in key.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = shape_pointer.shape
# let's reduce dimension
_lowerCAmelCase = value[0]
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Dict , __lowerCamelCase :List[Any] , __lowerCamelCase :int ):
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = """.""".join([key, hf_param_name] )
else:
_lowerCAmelCase = key
_lowerCAmelCase = value if """lm_head""" in full_key else value[0]
_lowercase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def A (__lowerCamelCase :Any , __lowerCamelCase :int , __lowerCamelCase :List[str]=None , __lowerCamelCase :List[Any]=None ):
_lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase = """weight_v"""
elif "bias" in name:
_lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = """weight"""
else:
_lowerCAmelCase = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def A (__lowerCamelCase :Any , __lowerCamelCase :Dict , __lowerCamelCase :Dict ):
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase = True
else:
_lowerCAmelCase = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[int] , __lowerCamelCase :Any , __lowerCamelCase :List[Any] , __lowerCamelCase :List[Any] ):
_lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase = name.split(""".""" )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any]=None , __lowerCamelCase :Union[str, Any]=None , __lowerCamelCase :str=True , __lowerCamelCase :str=False ):
if config_path is not None:
_lowerCAmelCase = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaConfig()
if is_seq_class:
_lowerCAmelCase = read_txt_into_dict(__lowerCamelCase )
_lowerCAmelCase = idalabel
_lowerCAmelCase = WavaVecaForSequenceClassification(__lowerCamelCase )
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
_lowerCAmelCase = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase = target_dict.pad_index
_lowerCAmelCase = target_dict.bos_index
_lowerCAmelCase = target_dict.eos_index
_lowerCAmelCase = len(target_dict.symbols )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase = 0
_lowerCAmelCase = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
_lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
_lowerCAmelCase = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_lowerCAmelCase = WavaVecaForCTC(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase = fairseq.tasks.setup_task(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_lowercase = parser.parse_args()
_lowercase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 5
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__ : Optional[int] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = ['''DeiTFeatureExtractor''']
A__ : List[Any] = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714
|
'''simple docstring'''
import os
import sys
import unittest
A__ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A__ : List[str] = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
A__ : List[str] = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = get_test_to_tester_mapping(__a )
__snake_case : Any = get_test_to_tester_mapping(__a )
__snake_case : Dict = {'BertModelTest': 'BertModelTester'}
__snake_case : Optional[int] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(__a ) , __a )
self.assertEqual(get_test_info.to_json(__a ) , __a )
def A_ ( self : Dict ) -> int:
'''simple docstring'''
__snake_case : Tuple = get_model_to_test_mapping(__a )
__snake_case : str = get_model_to_test_mapping(__a )
__snake_case : Tuple = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
__snake_case : Tuple = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(__a ) , __a )
self.assertEqual(get_test_info.to_json(__a ) , __a )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = get_model_to_tester_mapping(__a )
__snake_case : List[str] = get_model_to_tester_mapping(__a )
__snake_case : Dict = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
__snake_case : int = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(__a ) , __a )
self.assertEqual(get_test_info.to_json(__a ) , __a )
| 124
| 0
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ) -> tuple[int, float, str]:
_snake_case = cipher_alphabet or [chr(lowerCAmelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_snake_case = {
'''a''': 0.0_8497,
'''b''': 0.0_1492,
'''c''': 0.0_2202,
'''d''': 0.0_4253,
'''e''': 0.1_1162,
'''f''': 0.0_2228,
'''g''': 0.0_2015,
'''h''': 0.0_6094,
'''i''': 0.0_7546,
'''j''': 0.0_0153,
'''k''': 0.0_1292,
'''l''': 0.0_4025,
'''m''': 0.0_2406,
'''n''': 0.0_6749,
'''o''': 0.0_7507,
'''p''': 0.0_1929,
'''q''': 0.0_0095,
'''r''': 0.0_7587,
'''s''': 0.0_6327,
'''t''': 0.0_9356,
'''u''': 0.0_2758,
'''v''': 0.0_0978,
'''w''': 0.0_2560,
'''x''': 0.0_0150,
'''y''': 0.0_1994,
'''z''': 0.0_0077,
}
else:
# Custom frequencies dictionary
_snake_case = frequencies_dict
if not case_sensitive:
_snake_case = ciphertext.lower()
# Chi squared statistic values
_snake_case = {}
# cycle through all of the shifts
for shift in range(len(lowerCAmelCase_ ) ):
_snake_case = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_snake_case = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCAmelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_snake_case = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_snake_case = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_snake_case = decrypted_with_shift.lower().count(lowerCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_snake_case = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_snake_case = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_snake_case = decrypted_with_shift.count(lowerCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_snake_case = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_snake_case = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_snake_case = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCAmelCase_ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_snake_case = min(
lowerCAmelCase_ , key=lowerCAmelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_snake_case
) , (
_snake_case
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 103
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCamelCase ( lowerCamelCase_: str=None ):
'''simple docstring'''
if subparsers is not None:
A : Tuple = subparsers.add_parser('''test''' )
else:
A : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=lowerCamelCase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase_ )
return parser
def _lowerCamelCase ( lowerCamelCase_: str ):
'''simple docstring'''
A : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
A : Any = script_name
else:
A : str = f"""--config_file={args.config_file} {script_name}"""
A : Tuple = ['''accelerate-launch'''] + test_args.split()
A : List[Any] = execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def _lowerCamelCase ( ):
'''simple docstring'''
A : List[str] = test_command_parser()
A : Any = parser.parse_args()
test_command(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 256
| 0
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ):
"""simple docstring"""
snake_case__ : Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
snake_case__ : List[str] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
snake_case__ : Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
snake_case__ : List[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
snake_case__ : Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
snake_case__ : str = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
snake_case__ : Union[str, Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
snake_case__ : Dict = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
"""simple docstring"""
if split_mlp_wi:
snake_case__ : Optional[int] = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
snake_case__ : Dict = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
snake_case__ : str = (wi_a, wi_a)
else:
snake_case__ : Tuple = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
snake_case__ : int = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCAmelCase__ ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ):
"""simple docstring"""
snake_case__ : int = traverse_util.flatten_dict(variables["""target"""] )
snake_case__ : int = {"""/""".join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case__ : Optional[Any] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , UpperCAmelCase )
snake_case__ : int = collections.OrderedDict()
# Shared embeddings.
snake_case__ : Union[str, Any] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
snake_case__ : Any = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , """encoder""" , """pre_attention_layer_norm""" )
snake_case__ : Any = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , """encoder""" , """attention""" )
snake_case__ : List[Any] = layer_norm
snake_case__ : Optional[Any] = k.T
snake_case__ : List[Any] = o.T
snake_case__ : List[Any] = q.T
snake_case__ : Union[str, Any] = v.T
# Block i, layer 1 (MLP).
snake_case__ : Any = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , """encoder""" , """pre_mlp_layer_norm""" )
snake_case__ : Optional[int] = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , """encoder""" , UpperCAmelCase )
snake_case__ : Union[str, Any] = layer_norm
if split_mlp_wi:
snake_case__ : str = wi[0].T
snake_case__ : Union[str, Any] = wi[1].T
else:
snake_case__ : List[Any] = wi.T
snake_case__ : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case__ : Union[str, Any] = tax_relpos_bias_lookup(
UpperCAmelCase , UpperCAmelCase , """encoder""" ).T
snake_case__ : Optional[Any] = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
snake_case__ : Any = tax_relpos_bias_lookup(
UpperCAmelCase , 0 , """encoder""" ).T
snake_case__ : int = tax_relpos_bias_lookup(
UpperCAmelCase , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
snake_case__ : Union[str, Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , """decoder""" , """pre_self_attention_layer_norm""" )
snake_case__ : Optional[int] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , """decoder""" , """self_attention""" )
snake_case__ : Union[str, Any] = layer_norm
snake_case__ : Optional[Any] = k.T
snake_case__ : int = o.T
snake_case__ : int = q.T
snake_case__ : str = v.T
# Block i, layer 1 (Cross Attention).
snake_case__ : int = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
snake_case__ : List[Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , """decoder""" , """encoder_decoder_attention""" )
snake_case__ : List[str] = layer_norm
snake_case__ : Optional[Any] = k.T
snake_case__ : Tuple = o.T
snake_case__ : Optional[int] = q.T
snake_case__ : str = v.T
# Block i, layer 2 (MLP).
snake_case__ : int = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , """decoder""" , """pre_mlp_layer_norm""" )
snake_case__ : Union[str, Any] = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , """decoder""" , UpperCAmelCase )
snake_case__ : Tuple = layer_norm
if split_mlp_wi:
snake_case__ : Union[str, Any] = wi[0].T
snake_case__ : List[str] = wi[1].T
else:
snake_case__ : Optional[Any] = wi.T
snake_case__ : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case__ : List[str] = tax_relpos_bias_lookup(UpperCAmelCase , UpperCAmelCase , """decoder""" ).T
snake_case__ : Union[str, Any] = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case__ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case__ : List[Any] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case__ : Dict = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
snake_case__ : str = state_dict["""shared.weight"""]
return state_dict
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : int = checkpoints.load_tax_checkpoint(UpperCAmelCase )
snake_case__ : Dict = convert_tax_to_pytorch(
UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase , scalable_attention=UpperCAmelCase )
snake_case__ : List[str] = make_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = False , ):
"""simple docstring"""
snake_case__ : str = MTaConfig.from_json_file(UpperCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case__ : Tuple = UMTaEncoderModel(UpperCAmelCase )
else:
snake_case__ : Optional[int] = UMTaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print("""Done""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
lowerCAmelCase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 705
|
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : List[str] = year % 19
snake_case__ : Optional[Any] = year % 4
snake_case__ : Optional[Any] = year % 7
snake_case__ : List[str] = math.floor(year / 100 )
snake_case__ : int = math.floor((13 + 8 * leap_day_inhibits) / 25 )
snake_case__ : Dict = leap_day_inhibits / 4
snake_case__ : Optional[int] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
snake_case__ : Optional[int] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case__ : Optional[Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
snake_case__ : Optional[int] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase , 4 , 18 )
else:
return datetime(UpperCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase__ = 'will be' if year > datetime.now().year else 'was'
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 172
| 0
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,) -> int:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_UpperCamelCase =s_dict.pop(__SCREAMING_SNAKE_CASE )
elif "subsample" in key:
_UpperCamelCase =s_dict.pop(__SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase =emb.weight.shape
_UpperCamelCase =nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =emb.weight.data
return lin_layer
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )
_UpperCamelCase =mam_aaa['''args''']
_UpperCamelCase =mam_aaa['''model''']
_UpperCamelCase =state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__SCREAMING_SNAKE_CASE )
rename_keys(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =state_dict['''decoder.embed_tokens.weight'''].shape[0]
_UpperCamelCase =args.share_decoder_input_output_embed
_UpperCamelCase =[int(__SCREAMING_SNAKE_CASE ) for i in args.conv_kernel_sizes.split(''',''' )]
_UpperCamelCase =SpeechaTextConfig(
vocab_size=__SCREAMING_SNAKE_CASE , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__SCREAMING_SNAKE_CASE ) , conv_channels=args.conv_channels , conv_kernel_sizes=__SCREAMING_SNAKE_CASE , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__SCREAMING_SNAKE_CASE , num_beams=5 , max_length=200 , use_cache=__SCREAMING_SNAKE_CASE , decoder_start_token_id=2 , early_stopping=__SCREAMING_SNAKE_CASE , )
_UpperCamelCase =SpeechaTextForConditionalGeneration(__SCREAMING_SNAKE_CASE )
_UpperCamelCase , _UpperCamelCase =model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0 and not set(__SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
_UpperCamelCase =make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCamelCase =lm_head_weights
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCamelCase : List[Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 404
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'camembert'
def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 13
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowercase__ : Dict = parser.parse_args()
if args.model_type == "bert":
lowercase__ : Dict = BertForMaskedLM.from_pretrained(args.model_name)
lowercase__ : int = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
lowercase__ : List[Any] = model.state_dict()
lowercase__ : Optional[Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
lowercase__ : Dict = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
lowercase__ : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
lowercase__ : List[Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowercase__ : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
lowercase__ : str = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
lowercase__ : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
lowercase__ : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
lowercase__ : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
lowercase__ : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
lowercase__ : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
lowercase__ : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
lowercase__ : int = state_dict['''cls.predictions.decoder.weight''']
lowercase__ : List[Any] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowercase__ : Optional[Any] = state_dict[f"""cls.predictions.transform.dense.{w}"""]
lowercase__ : Union[str, Any] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 8
|
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if n_term == "":
return []
__a : list = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f"1/{temp + 1}" if series else '''1''' )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 521
| 0
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : str = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> Optional[Any]:
'''simple docstring'''
try:
__UpperCAmelCase : int = int(lowerCamelCase_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : Optional[int] = 2
while i * i <= n:
while n % i == 0:
__UpperCAmelCase : Union[str, Any] = i
n //= i
i += 1
if n > 1:
__UpperCAmelCase : Union[str, Any] = n
return int(lowerCamelCase_ )
if __name__ == "__main__":
print(F"{solution() = }")
| 713
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
UpperCAmelCase : List[Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
UpperCAmelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCamelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
with open(_UpperCamelCase , """rb""" ) as f:
__UpperCAmelCase : List[Any] = Image.open(_UpperCamelCase )
return im.convert("""RGB""" )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
default=A , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
__a = field(
default=A , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__a = field(default=A , metadata={"""help""": """A folder containing the training data."""} )
__a = field(default=A , metadata={"""help""": """A folder containing the validation data."""} )
__a = field(
default=0.1_5 , metadata={"""help""": """Percent to split off of train for validation."""} )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__a = field(
default=A , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(A )} , )
__a = field(
default=A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a = field(
default=A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
__a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a = field(default=A , metadata={"""help""": """Name or path of preprocessor config."""} )
__a = field(
default=A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__a = field(
default=A , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCamelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = torch.stack([example["""pixel_values"""] for example in examples] )
__UpperCAmelCase : Optional[Any] = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : int = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__UpperCAmelCase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__UpperCAmelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
__UpperCAmelCase : Tuple = {}
if data_args.train_dir is not None:
__UpperCAmelCase : Optional[int] = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
__UpperCAmelCase : Any = os.path.join(data_args.validation_dir , """**""" )
__UpperCAmelCase : Dict = load_dataset(
"""imagefolder""" , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCAmelCase : List[str] = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
__UpperCAmelCase : Any = dataset["""train"""].train_test_split(data_args.train_val_split )
__UpperCAmelCase : Union[str, Any] = split["""train"""]
__UpperCAmelCase : Optional[Any] = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__UpperCAmelCase : Union[str, Any] = dataset["""train"""].features["""labels"""].names
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = {}, {}
for i, label in enumerate(_UpperCamelCase ):
__UpperCAmelCase : Any = str(_UpperCamelCase )
__UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
__UpperCAmelCase : str = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__UpperCAmelCase : int = image_processor.size["""shortest_edge"""]
else:
__UpperCAmelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""])
__UpperCAmelCase : List[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__UpperCAmelCase : Any = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__UpperCAmelCase : str = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : List[Any] ):
__UpperCAmelCase : Optional[int] = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(_UpperCamelCase : Optional[int] ):
__UpperCAmelCase : Dict = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
__UpperCAmelCase : str = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Dict = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
__UpperCAmelCase : Any = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : Dict = last_checkpoint
__UpperCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCAmelCase : Optional[int] = trainer.evaluate()
trainer.log_metrics("""eval""" , _UpperCamelCase )
trainer.save_metrics("""eval""" , _UpperCamelCase )
# Write model card and (optionally) push to hub
__UpperCAmelCase : Union[str, Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 299
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : List[str] = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str ) -> YolosConfig:
__A : Any = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__A : Tuple = 1_92
__A : Dict = 7_68
__A : Dict = 12
__A : Union[str, Any] = 3
__A : int = [8_00, 13_33]
__A : List[str] = False
elif yolos_name == "yolos_s_dWr":
__A : Optional[int] = 3_30
__A : Union[str, Any] = 14
__A : str = 6
__A : List[Any] = 13_20
elif "yolos_s" in yolos_name:
__A : str = 3_84
__A : int = 15_36
__A : Any = 12
__A : Tuple = 6
elif "yolos_b" in yolos_name:
__A : List[str] = [8_00, 13_44]
__A : Optional[int] = 91
__A : Optional[Any] = 'huggingface/label-files'
__A : List[str] = 'coco-detection-id2label.json'
__A : List[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) )
__A : Optional[Any] = {int(_snake_case ): v for k, v in idalabel.items()}
__A : List[Any] = idalabel
__A : Dict = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( __snake_case : dict , __snake_case : YolosConfig , __snake_case : bool = False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A : Optional[int] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
__A : Optional[int] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__A : int = in_proj_weight[: config.hidden_size, :]
__A : Optional[int] = in_proj_bias[: config.hidden_size]
__A : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A : List[Any] = in_proj_weight[-config.hidden_size :, :]
__A : List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( __snake_case : str ) -> str:
if "backbone" in name:
__A : List[str] = name.replace('backbone' , 'vit' )
if "cls_token" in name:
__A : Dict = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
__A : Tuple = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
__A : Dict = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
__A : Optional[int] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
__A : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
__A : str = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
__A : str = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__A : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__A : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__A : int = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__A : Dict = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__A : str = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
__A : Optional[int] = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
__A : Dict = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
__A : Dict = name.replace('vit.norm' , 'vit.layernorm' )
return name
def _lowerCAmelCase ( __snake_case : dict , __snake_case : YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
__A : List[str] = orig_state_dict.pop(_snake_case )
if "qkv" in key:
__A : Optional[int] = key.split('.' )
__A : List[str] = int(key_split[2] )
__A : List[Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__A : Dict = val[:dim, :]
__A : List[Any] = val[
dim : dim * 2, :
]
__A : Optional[int] = val[-dim:, :]
else:
__A : Optional[int] = val[:dim]
__A : Optional[int] = val[dim : dim * 2]
__A : List[Any] = val[-dim:]
else:
__A : Tuple = val
return orig_state_dict
def _lowerCAmelCase ( ) -> torch.Tensor:
__A : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A : Tuple = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , __snake_case : str , __snake_case : bool = False ) -> Dict:
__A : Optional[Any] = get_yolos_config(_snake_case )
# load original state_dict
__A : List[Any] = torch.load(_snake_case , map_location='cpu' )['model']
# load 🤗 model
__A : Any = YolosForObjectDetection(_snake_case )
model.eval()
__A : Any = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
__A : Union[str, Any] = 8_00 if yolos_name != 'yolos_ti' else 5_12
__A : List[Any] = YolosImageProcessor(format='coco_detection' , size=_snake_case )
__A : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
__A : Dict = model(**_snake_case )
__A ,__A : Tuple = outputs.logits, outputs.pred_boxes
__A ,__A : Any = None, None
if yolos_name == "yolos_ti":
__A : Dict = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__A : Optional[Any] = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__A : Tuple = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__A : Optional[Any] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__A : Tuple = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__A : str = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__A : Union[str, Any] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__A : Optional[int] = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__A : Optional[int] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__A : Any = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _snake_case , atol=1e-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
__A : Union[str, Any] = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
__A : str = model_mapping[yolos_name]
image_processor.push_to_hub(_snake_case , organization='hustvl' )
model.push_to_hub(_snake_case , organization='hustvl' )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase__ : int = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 8
|
UpperCAmelCase_ = 2_5_6
# Modulus to hash a string
UpperCAmelCase_ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool:
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = '''abc1abc12'''
_A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = '''ABABX'''
_A = '''ABABZABABYABABX'''
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = '''AAAB'''
_A = '''ABAAAAAB'''
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = '''abcdabcy'''
_A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = '''Lü'''
_A = '''Lüsai'''
assert rabin_karp(_snake_case , _snake_case )
_A = '''Lue'''
assert not rabin_karp(_snake_case , _snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 2
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
A_ = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
A_ = '''UperNetConfig'''
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: str, a_: int, a_: int, a_: Union[int, Tuple[int, int]], a_: Union[int, Tuple[int, int], str] = 0, a_: bool = False, a_: Union[int, Tuple[int, int]] = 1, ):
'''simple docstring'''
super().__init__()
_snake_case : Union[str, Any] = nn.Convad(
in_channels=a_, out_channels=a_, kernel_size=a_, padding=a_, bias=a_, dilation=a_, )
_snake_case : Optional[int] = nn.BatchNormad(a_ )
_snake_case : Tuple = nn.ReLU()
def UpperCamelCase_ ( self: Optional[Any], a_: torch.Tensor ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.conv(a_ )
_snake_case : Optional[int] = self.batch_norm(a_ )
_snake_case : Dict = self.activation(a_ )
return output
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[str], a_: int, a_: int, a_: int ):
'''simple docstring'''
super().__init__()
_snake_case : List[str] = [
nn.AdaptiveAvgPoolad(a_ ),
UperNetConvModule(a_, a_, kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(a_ ), a_ )
def UpperCamelCase_ ( self: List[Any], a_: torch.Tensor ):
'''simple docstring'''
_snake_case : List[Any] = input
for layer in self.layers:
_snake_case : str = layer(a_ )
return hidden_state
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[str], a_: Tuple[int, ...], a_: int, a_: int, a_: bool ):
'''simple docstring'''
super().__init__()
_snake_case : List[str] = pool_scales
_snake_case : Optional[Any] = align_corners
_snake_case : List[Any] = in_channels
_snake_case : Dict = channels
_snake_case : Optional[int] = []
for i, pool_scale in enumerate(a_ ):
_snake_case : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=a_, in_channels=a_, channels=a_ )
self.blocks.append(a_ )
self.add_module(str(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: torch.Tensor ):
'''simple docstring'''
_snake_case : str = []
for ppm in self.blocks:
_snake_case : Any = ppm(a_ )
_snake_case : Dict = nn.functional.interpolate(
a_, size=x.size()[2:], mode="""bilinear""", align_corners=self.align_corners )
ppm_outs.append(a_ )
return ppm_outs
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Any, a_: Any, a_: Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Tuple = config
_snake_case : Tuple = config.pool_scales # e.g. (1, 2, 3, 6)
_snake_case : str = in_channels
_snake_case : Dict = config.hidden_size
_snake_case : Any = False
_snake_case : List[Any] = nn.Convad(self.channels, config.num_labels, kernel_size=1 )
# PSP Module
_snake_case : List[Any] = UperNetPyramidPoolingModule(
self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners, )
_snake_case : int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels, self.channels, kernel_size=3, padding=1, )
# FPN Module
_snake_case : str = nn.ModuleList()
_snake_case : Tuple = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_snake_case : List[Any] = UperNetConvModule(a_, self.channels, kernel_size=1 )
_snake_case : Optional[Any] = UperNetConvModule(self.channels, self.channels, kernel_size=3, padding=1 )
self.lateral_convs.append(a_ )
self.fpn_convs.append(a_ )
_snake_case : str = UperNetConvModule(
len(self.in_channels ) * self.channels, self.channels, kernel_size=3, padding=1, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
self.apply(self._init_weights )
def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ):
'''simple docstring'''
if isinstance(a_, nn.Convad ):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCamelCase_ ( self: List[Any], a_: Any ):
'''simple docstring'''
_snake_case : Optional[int] = inputs[-1]
_snake_case : Tuple = [x]
psp_outs.extend(self.psp_modules(a_ ) )
_snake_case : Tuple = torch.cat(a_, dim=1 )
_snake_case : Optional[Any] = self.bottleneck(a_ )
return output
def UpperCamelCase_ ( self: Optional[Any], a_: torch.Tensor ):
'''simple docstring'''
_snake_case : int = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(a_ ) )
# build top-down path
_snake_case : List[str] = len(a_ )
for i in range(used_backbone_levels - 1, 0, -1 ):
_snake_case : Dict = laterals[i - 1].shape[2:]
_snake_case : Tuple = laterals[i - 1] + nn.functional.interpolate(
laterals[i], size=a_, mode="""bilinear""", align_corners=self.align_corners )
# build outputs
_snake_case : Optional[int] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1, 0, -1 ):
_snake_case : int = nn.functional.interpolate(
fpn_outs[i], size=fpn_outs[0].shape[2:], mode="""bilinear""", align_corners=self.align_corners )
_snake_case : Optional[Any] = torch.cat(a_, dim=1 )
_snake_case : Any = self.fpn_bottleneck(a_ )
_snake_case : Dict = self.classifier(a_ )
return output
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Dict, a_: int, a_: int = 2, a_: int = 3, a_: Union[int, Tuple[int, int]] = 1 ):
'''simple docstring'''
super().__init__()
_snake_case : List[Any] = config
_snake_case : Dict = config.auxiliary_in_channels
_snake_case : Dict = config.auxiliary_channels
_snake_case : int = config.auxiliary_num_convs
_snake_case : Any = config.auxiliary_concat_input
_snake_case : Union[str, Any] = in_index
_snake_case : int = (kernel_size // 2) * dilation
_snake_case : Tuple = []
convs.append(
UperNetConvModule(
self.in_channels, self.channels, kernel_size=a_, padding=a_, dilation=a_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels, self.channels, kernel_size=a_, padding=a_, dilation=a_ ) )
if self.num_convs == 0:
_snake_case : str = nn.Identity()
else:
_snake_case : List[Any] = nn.Sequential(*a_ )
if self.concat_input:
_snake_case : Dict = UperNetConvModule(
self.in_channels + self.channels, self.channels, kernel_size=a_, padding=kernel_size // 2 )
_snake_case : Optional[Any] = nn.Convad(self.channels, config.num_labels, kernel_size=1 )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
self.apply(self._init_weights )
def UpperCamelCase_ ( self: Dict, a_: List[str] ):
'''simple docstring'''
if isinstance(a_, nn.Convad ):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCamelCase_ ( self: List[Any], a_: torch.Tensor ):
'''simple docstring'''
_snake_case : Any = encoder_hidden_states[self.in_index]
_snake_case : Optional[Any] = self.convs(a_ )
if self.concat_input:
_snake_case : Any = self.conv_cat(torch.cat([hidden_states, output], dim=1 ) )
_snake_case : str = self.classifier(a_ )
return output
class lowercase( __a ):
'''simple docstring'''
lowercase__ = UperNetConfig
lowercase__ = "pixel_values"
lowercase__ = True
def UpperCamelCase_ ( self: Dict, a_: Optional[int] ):
'''simple docstring'''
if isinstance(a_, a_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCamelCase_ ( self: int, a_: Union[str, Any], a_: str=False ):
'''simple docstring'''
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = value
A_ = r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
A_ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , __a , )
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Union[str, Any], a_: Any ):
'''simple docstring'''
super().__init__(a_ )
_snake_case : Tuple = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
_snake_case : int = UperNetHead(a_, in_channels=self.backbone.channels )
_snake_case : Tuple = UperNetFCNHead(a_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=a_, config_class=_CONFIG_FOR_DOC )
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[torch.Tensor] = None, a_: Optional[bool] = None, a_: Optional[bool] = None, a_: Optional[torch.Tensor] = None, a_: Optional[bool] = None, ):
'''simple docstring'''
_snake_case : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
_snake_case : str = self.backbone.forward_with_filtered_kwargs(
a_, output_hidden_states=a_, output_attentions=a_ )
_snake_case : Dict = outputs.feature_maps
_snake_case : Optional[Any] = self.decode_head(a_ )
_snake_case : int = nn.functional.interpolate(a_, size=pixel_values.shape[2:], mode="""bilinear""", align_corners=a_ )
_snake_case : Dict = None
if self.auxiliary_head is not None:
_snake_case : Optional[Any] = self.auxiliary_head(a_ )
_snake_case : List[str] = nn.functional.interpolate(
a_, size=pixel_values.shape[2:], mode="""bilinear""", align_corners=a_ )
_snake_case : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
_snake_case : str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
_snake_case : Any = loss_fct(a_, a_ )
_snake_case : List[str] = loss_fct(a_, a_ )
_snake_case : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_snake_case : List[str] = (logits,) + outputs[1:]
else:
_snake_case : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=a_, logits=a_, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
| 28
|
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28
| 1
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece.model''')
UpperCamelCase_ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
UpperCamelCase_ : List[Any] = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case = CamembertTokenizer
snake_case = CamembertTokenizerFast
snake_case = True
snake_case = True
def lowerCamelCase__ ( self : Dict ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A_ = CamembertTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : str ) -> Dict:
"""simple docstring"""
A_ = "<pad>"
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_snake_case ) , 1_004 )
def lowerCamelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
A_ = CamembertTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
A_ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
A_ = "I was born in 92000, and this is falsé."
A_ = tokenizer.encode(_snake_case )
A_ = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
A_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
A_ = tokenizer.convert_ids_to_tokens(_snake_case )
A_ = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = "I was born in 92000, and this is falsé."
A_ = tokenizer.tokenize(_snake_case )
A_ = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
A_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(_snake_case )
A_ = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@slow
def lowerCamelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
# fmt: off
A_ = {"input_ids": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
A_ = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_snake_case , )
| 115
|
'''simple docstring'''
a__ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a__ : List[Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a__ : Any = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
assert len(str(__A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__UpperCamelCase = year // 100
__UpperCamelCase = (5 * (century % 4) + 2) % 7
__UpperCamelCase = year % 100
__UpperCamelCase = centurian % 12
__UpperCamelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__UpperCamelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__UpperCamelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 601
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
SCREAMING_SNAKE_CASE_ = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
'''simple docstring'''
__snake_case : List[str] = '''rag'''
__snake_case : Tuple = True
def __init__( self : List[str] ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : int=" / " ,lowerCamelCase__ : Any=" // " ,lowerCamelCase__ : Tuple=5 ,lowerCamelCase__ : List[str]=300 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Dict=8 ,lowerCamelCase__ : Any="wiki_dpr" ,lowerCamelCase__ : Optional[Any]="train" ,lowerCamelCase__ : Any="compressed" ,lowerCamelCase__ : int=None ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Union[str, Any]=None ,**lowerCamelCase__ : Optional[Any] ,) -> Tuple:
'''simple docstring'''
super().__init__(
bos_token_id=UpperCamelCase__ ,pad_token_id=UpperCamelCase__ ,eos_token_id=UpperCamelCase__ ,decoder_start_token_id=UpperCamelCase__ ,forced_eos_token_id=UpperCamelCase__ ,is_encoder_decoder=UpperCamelCase__ ,prefix=UpperCamelCase__ ,vocab_size=UpperCamelCase__ ,**UpperCamelCase__ ,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
SCREAMING_SNAKE_CASE = kwargs.pop("""question_encoder""" )
SCREAMING_SNAKE_CASE = question_encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE = kwargs.pop("""generator""" )
SCREAMING_SNAKE_CASE = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE = AutoConfig.for_model(UpperCamelCase__ ,**UpperCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.for_model(UpperCamelCase__ ,**UpperCamelCase__ )
SCREAMING_SNAKE_CASE = reduce_loss
SCREAMING_SNAKE_CASE = label_smoothing
SCREAMING_SNAKE_CASE = exclude_bos_score
SCREAMING_SNAKE_CASE = do_marginalize
SCREAMING_SNAKE_CASE = title_sep
SCREAMING_SNAKE_CASE = doc_sep
SCREAMING_SNAKE_CASE = n_docs
SCREAMING_SNAKE_CASE = max_combined_length
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = dataset_split
SCREAMING_SNAKE_CASE = index_name
SCREAMING_SNAKE_CASE = retrieval_vector_size
SCREAMING_SNAKE_CASE = retrieval_batch_size
SCREAMING_SNAKE_CASE = passages_path
SCREAMING_SNAKE_CASE = index_path
SCREAMING_SNAKE_CASE = use_dummy_dataset
SCREAMING_SNAKE_CASE = output_retrieved
SCREAMING_SNAKE_CASE = do_deduplication
SCREAMING_SNAKE_CASE = use_cache
if self.forced_eos_token_id is None:
SCREAMING_SNAKE_CASE = getattr(self.generator ,"""forced_eos_token_id""" ,UpperCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : PretrainedConfig ,**lowerCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() ,generator=generator_config.to_dict() ,**UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.question_encoder.to_dict()
SCREAMING_SNAKE_CASE = self.generator.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 711
|
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
_validate_point(_SCREAMING_SNAKE_CASE )
_validate_point(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if point:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
SCREAMING_SNAKE_CASE = (
"""Expected a list of numbers as input, found """
F"""{type(_SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = F"""Expected a list of numbers as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
else:
raise ValueError("""Missing an input""" )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
_validate_point(_SCREAMING_SNAKE_CASE )
_validate_point(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116
| 0
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_UpperCamelCase : List[Any] = 'scheduler_config.json'
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = 3
__lowerCamelCase = 4
__lowerCamelCase = 5
__lowerCamelCase = 6
__lowerCamelCase = 7
__lowerCamelCase = 8
__lowerCamelCase = 9
__lowerCamelCase = 10
__lowerCamelCase = 11
__lowerCamelCase = 12
__lowerCamelCase = 13
__lowerCamelCase = 14
@dataclass
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = 42
class _lowercase:
"""simple docstring"""
__lowerCamelCase = SCHEDULER_CONFIG_NAME
__lowerCamelCase = []
__lowerCamelCase = True
@classmethod
def snake_case ( cls: int ,a: Dict[str, Any] = None ,a: Optional[str] = None ,a: List[Any]=False ,**a: Union[str, Any] ,):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = cls.load_config(
pretrained_model_name_or_path=a ,subfolder=a ,return_unused_kwargs=a ,return_commit_hash=a ,**a ,)
return cls.from_config(a ,return_unused_kwargs=a ,**a )
def snake_case ( self: Optional[Any] ,a: Union[str, os.PathLike] ,a: bool = False ,**a: int ):
self.save_config(save_directory=a ,push_to_hub=a ,**a )
@property
def snake_case ( self: int ):
return self._get_compatibles()
@classmethod
def snake_case ( cls: Optional[int] ):
__UpperCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
__UpperCAmelCase = importlib.import_module(__name__.split('.' )[0] )
__UpperCAmelCase = [
getattr(a ,a ) for c in compatible_classes_str if hasattr(a ,a )
]
return compatible_classes
| 396
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCamelCase : Any = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
_UpperCamelCase : List[Any] = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
_UpperCamelCase : List[str] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowercase( datasets.Metric ):
"""simple docstring"""
def snake_case ( self: int ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ,id='token' ) ,id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' ,id='token' ) ,id='sequence' ) ,id='references' ),
} ) ,)
def snake_case ( self: Dict ,a: List[List[List[str]]] ,a: List[List[str]] ,a: int = 1 ,a: int = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a ,hypotheses=a ,min_len=a ,max_len=a )
}
| 396
| 1
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int=7 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : str=18 , lowerCamelCase__ : Union[str, Any]=30 , lowerCamelCase__ : int=400 , lowerCamelCase__ : str=True , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCamelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCamelCase__ : List[str]=False , ) -> Tuple:
"""simple docstring"""
__lowercase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowercase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
__lowercase = do_reduce_labels
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _A( ) -> List[str]:
'''simple docstring'''
__lowercase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__lowercase = Image.open(dataset[0]['''file'''] )
__lowercase = Image.open(dataset[1]['''file'''] )
return image, map
def _A( ) -> int:
'''simple docstring'''
__lowercase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__lowercase = Image.open(ds[0]['''file'''] )
__lowercase = Image.open(ds[1]['''file'''] )
__lowercase = Image.open(ds[2]['''file'''] )
__lowercase = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
def UpperCAmelCase_ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase__ )
__lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCamelCase__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowercase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowercase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowercase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
__lowercase = []
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
__lowercase = image_processing(lowerCamelCase__ , lowerCamelCase__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
__lowercase , __lowercase = prepare_semantic_single_inputs()
__lowercase = image_processing(lowerCamelCase__ , lowerCamelCase__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
__lowercase , __lowercase = prepare_semantic_batch_inputs()
__lowercase = image_processing(lowerCamelCase__ , lowerCamelCase__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__lowercase , __lowercase = prepare_semantic_single_inputs()
__lowercase = image_processing(lowerCamelCase__ , lowerCamelCase__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
__lowercase = True
__lowercase = image_processing(lowerCamelCase__ , lowerCamelCase__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 362
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = GPTSwaTokenizer
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[Any] = False
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = GPTSwaTokenizer(lowerCamelCase__ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : int , lowerCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
__lowercase = '''This is a test'''
__lowercase = '''This is a test'''
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = '''<s>'''
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(lowerCamelCase__ ) , 2_000 )
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = GPTSwaTokenizer(lowerCamelCase__ )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [465, 287, 265, 631, 842] )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
lowerCamelCase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__lowercase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
# fmt: off
self.assertListEqual(
lowerCamelCase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = GPTSwaTokenizer(lowerCamelCase__ )
__lowercase = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__lowercase = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertListEqual(tokenizer.encode_fast(lowerCamelCase__ ) , lowerCamelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(tokenizer.decode_fast(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__lowercase = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=lowerCamelCase__ , )
| 362
| 1
|
"""simple docstring"""
import heapq
def snake_case_ ( A_ : dict ):
'''simple docstring'''
_lowerCamelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A_, [-1 * len(A_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
_lowerCamelCase : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_lowerCamelCase : Dict = heapq.heappop(A_ )[1][0]
chosen_vertices.add(A_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_lowerCamelCase : List[str] = elem[1][1].index(A_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 83
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCAmelCase__ = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__UpperCAmelCase ) , torch_builtin(__UpperCAmelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(__UpperCAmelCase ) , gelu_new(__UpperCAmelCase ) ) )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCAmelCase__ = get_activation("gelu" )
lowerCAmelCase__ = get_activation("gelu_10" )
lowerCAmelCase__ = torch_builtin(__UpperCAmelCase )
lowerCAmelCase__ = geluaa(__UpperCAmelCase )
lowerCAmelCase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__UpperCAmelCase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__UpperCAmelCase ):
get_activation("bogus" )
with self.assertRaises(__UpperCAmelCase ):
get_activation(__UpperCAmelCase )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = get_activation("gelu" )
lowerCAmelCase__ = 1
lowerCAmelCase__ = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ = acta.a
| 339
| 0
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_UpperCAmelCase = _symbol_database.Default()
_UpperCAmelCase = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
_UpperCAmelCase = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_UpperCAmelCase = None
_UpperCAmelCase = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_UpperCAmelCase = 4_5
_UpperCAmelCase = 1_5_8_1
_UpperCAmelCase = 1_5_1_7
_UpperCAmelCase = 1_5_7_0
_UpperCAmelCase = 1_5_8_4
_UpperCAmelCase = 1_7_9_3
_UpperCAmelCase = 1_7_9_5
_UpperCAmelCase = 1_9_1_6
_UpperCAmelCase = 1_8_6_4
_UpperCAmelCase = 1_9_0_5
_UpperCAmelCase = 1_9_1_9
_UpperCAmelCase = 2_4_2_9
_UpperCAmelCase = 2_2_0_8
_UpperCAmelCase = 2_4_1_8
_UpperCAmelCase = 2_3_2_3
_UpperCAmelCase = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 713
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env" )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
UpperCamelCase_ = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_SCREAMING_SNAKE_CASE , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_SCREAMING_SNAKE_CASE , py_version="py36" , )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Any ) -> List[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.create_estimator(_SCREAMING_SNAKE_CASE )
# run training
estimator.fit()
# result dataframe
UpperCamelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE )
| 371
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''timesformer'''
def __init__( self , __lowerCAmelCase=2_2_4 , __lowerCAmelCase=1_6 , __lowerCAmelCase=3 , __lowerCAmelCase=8 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=True , __lowerCAmelCase="divided_space_time" , __lowerCAmelCase=0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
__magic_name__ :str = image_size
__magic_name__ :Tuple = patch_size
__magic_name__ :Any = num_channels
__magic_name__ :str = num_frames
__magic_name__ :Optional[Any] = hidden_size
__magic_name__ :Union[str, Any] = num_hidden_layers
__magic_name__ :Optional[Any] = num_attention_heads
__magic_name__ :Optional[int] = intermediate_size
__magic_name__ :List[Any] = hidden_act
__magic_name__ :str = hidden_dropout_prob
__magic_name__ :Optional[Any] = attention_probs_dropout_prob
__magic_name__ :Optional[Any] = initializer_range
__magic_name__ :Dict = layer_norm_eps
__magic_name__ :List[Any] = qkv_bias
__magic_name__ :int = attention_type
__magic_name__ :Optional[Any] = drop_path_rate
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = '''layoutlmv3'''
def __init__(self : Dict , A__ : List[str]=5_0_2_6_5 , A__ : str=7_6_8 , A__ : Tuple=1_2 , A__ : int=1_2 , A__ : Optional[Any]=3_0_7_2 , A__ : Tuple="gelu" , A__ : Union[str, Any]=0.1 , A__ : Any=0.1 , A__ : Union[str, Any]=5_1_2 , A__ : Dict=2 , A__ : Any=0.0_2 , A__ : List[str]=1e-5 , A__ : Optional[Any]=1 , A__ : Optional[Any]=0 , A__ : List[str]=2 , A__ : Optional[int]=1_0_2_4 , A__ : Optional[Any]=1_2_8 , A__ : Any=1_2_8 , A__ : List[str]=True , A__ : List[str]=3_2 , A__ : Optional[Any]=1_2_8 , A__ : List[Any]=6_4 , A__ : Union[str, Any]=2_5_6 , A__ : Optional[int]=True , A__ : int=True , A__ : Any=True , A__ : List[str]=2_2_4 , A__ : List[str]=3 , A__ : Optional[Any]=1_6 , A__ : Optional[int]=None , **A__ : List[Any] , ) -> Any:
super().__init__(
vocab_size=A__ , hidden_size=A__ , num_hidden_layers=A__ , num_attention_heads=A__ , intermediate_size=A__ , hidden_act=A__ , hidden_dropout_prob=A__ , attention_probs_dropout_prob=A__ , max_position_embeddings=A__ , type_vocab_size=A__ , initializer_range=A__ , layer_norm_eps=A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ , )
lowercase = max_ad_position_embeddings
lowercase = coordinate_size
lowercase = shape_size
lowercase = has_relative_attention_bias
lowercase = rel_pos_bins
lowercase = max_rel_pos
lowercase = has_spatial_attention_bias
lowercase = rel_ad_pos_bins
lowercase = max_rel_ad_pos
lowercase = text_embed
lowercase = visual_embed
lowercase = input_size
lowercase = num_channels
lowercase = patch_size
lowercase = classifier_dropout
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : List[Any] = version.parse('''1.12''' )
@property
def UpperCAmelCase__ (self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def UpperCAmelCase__ (self : Any ) -> float:
return 1e-5
@property
def UpperCAmelCase__ (self : Optional[int] ) -> int:
return 1_2
def UpperCAmelCase__ (self : Optional[int] , A__ : "ProcessorMixin" , A__ : int = -1 , A__ : int = -1 , A__ : bool = False , A__ : Optional["TensorType"] = None , A__ : int = 3 , A__ : int = 4_0 , A__ : int = 4_0 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , "apply_ocr" , A__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase = compute_effective_axis_dimension(
A__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase = processor.tokenizer.num_special_tokens_to_add(A__ )
lowercase = compute_effective_axis_dimension(
A__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A__ )
# Generate dummy inputs according to compute batch and sequence
lowercase = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase = self._generate_dummy_images(A__ , A__ , A__ , A__ )
lowercase = dict(
processor(
A__ , text=A__ , boxes=A__ , return_tensors=A__ , ) )
return inputs
| 310
| 0
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a_ : Optional[int] = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
a_ : Any = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def UpperCAmelCase ( A__: Dict , A__: str=False ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = create_model(
'HTSAT-tiny' , 'roberta' , A__ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=A__ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def UpperCAmelCase ( A__: List[str] ) -> List[str]:
__lowerCamelCase : Union[str, Any] = {}
__lowerCamelCase : int = r'.*sequential.(\d+).*'
__lowerCamelCase : List[Any] = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCamelCase : str = key.replace(A__ , A__ )
if re.match(A__ , A__ ):
# replace sequential layers with list
__lowerCamelCase : Optional[int] = re.match(A__ , A__ ).group(1 )
__lowerCamelCase : Dict = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(A__ )//3}.linear.''' )
elif re.match(A__ , A__ ):
__lowerCamelCase : List[str] = int(re.match(A__ , A__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowerCamelCase : List[Any] = 1 if projecton_layer == 0 else 2
__lowerCamelCase : Tuple = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowerCamelCase : Optional[int] = value
__lowerCamelCase : Optional[int] = mixed_qkv.size(0 ) // 3
__lowerCamelCase : Tuple = mixed_qkv[:qkv_dim]
__lowerCamelCase : List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowerCamelCase : Any = mixed_qkv[qkv_dim * 2 :]
__lowerCamelCase : str = query_layer
__lowerCamelCase : Tuple = key_layer
__lowerCamelCase : List[Any] = value_layer
else:
__lowerCamelCase : int = value
return model_state_dict
def UpperCAmelCase ( A__: List[Any] , A__: int , A__: List[Any] , A__: List[str]=False ) -> str:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = init_clap(A__ , enable_fusion=A__ )
clap_model.eval()
__lowerCamelCase : Any = clap_model.state_dict()
__lowerCamelCase : Tuple = rename_state_dict(A__ )
__lowerCamelCase : Optional[int] = ClapConfig()
__lowerCamelCase : Dict = enable_fusion
__lowerCamelCase : int = ClapModel(A__ )
# ignore the spectrogram embedding layer
model.load_state_dict(A__ , strict=A__ )
model.save_pretrained(A__ )
transformers_config.save_pretrained(A__ )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
a_ : Union[str, Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 263
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a_ : List[str] = None
a_ : Any = logging.get_logger(__name__)
a_ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ : int = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
a_ : Any = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
a_ : Union[str, Any] = '''▁'''
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : Optional[Any] = VOCAB_FILES_NAMES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ['input_ids', 'attention_mask']
__a : Optional[Any] = BarthezTokenizer
def __init__( self , __a=None , __a=None , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , **__a , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Optional[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , **__a , )
__lowerCamelCase : Tuple = vocab_file
__lowerCamelCase : List[str] = False if not self.vocab_file else True
def snake_case_ ( self , __a , __a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Optional[Any] = [self.cls_token_id]
__lowerCamelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , __a , __a = None ):
__lowerCamelCase : Optional[int] = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , __a , __a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase : List[Any] = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 263
| 1
|
def UpperCamelCase_ ( __a ) -> str:
stooge(__a , 0 , len(__a ) - 1 )
return arr
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
a__, a__ : Optional[int] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
a__ : List[Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__a , __a , (h - t) )
# Recursively sort last 2/3 elements
stooge(__a , i + t , (__a) )
# Recursively sort first 2/3 elements
stooge(__a , __a , (h - t) )
if __name__ == "__main__":
UpperCamelCase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase : str = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 37
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase: Optional[int] = logging.get_logger(__name__)
__UpperCamelCase: Optional[int] = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "mobilenet_v1"
def __init__( self: Dict, lowerCamelCase_: Any=3, lowerCamelCase_: Dict=224, lowerCamelCase_: int=1.0, lowerCamelCase_: List[Any]=8, lowerCamelCase_: int="relu6", lowerCamelCase_: Optional[int]=True, lowerCamelCase_: Any=0.9_9_9, lowerCamelCase_: List[str]=0.0_2, lowerCamelCase_: Optional[int]=0.0_0_1, **lowerCamelCase_: Tuple, ):
super().__init__(**lowerCamelCase_ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
lowercase__ : Optional[int] = num_channels
lowercase__ : List[Any] = image_size
lowercase__ : str = depth_multiplier
lowercase__ : Optional[Any] = min_depth
lowercase__ : int = hidden_act
lowercase__ : Dict = tf_padding
lowercase__ : Optional[Any] = classifier_dropout_prob
lowercase__ : int = initializer_range
lowercase__ : Any = layer_norm_eps
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = version.parse("1.11" )
@property
def snake_case__( self: Optional[int] ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def snake_case__( self: Dict ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def snake_case__( self: List[Any] ):
return 1E-4
| 266
| 0
|
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = ''''''
for word_or_phrase in separated:
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 597
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( __lowerCamelCase = "laptop" ):
SCREAMING_SNAKE_CASE_ = F'''https://www.amazon.in/laptop/s?k={product}'''
SCREAMING_SNAKE_CASE_ = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
SCREAMING_SNAKE_CASE_ = BeautifulSoup(requests.get(__lowerCamelCase, headers=__lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
SCREAMING_SNAKE_CASE_ = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''', attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''}, ), soup.find_all('''div''', attrs={'''class''': '''a-row a-size-base a-color-base'''} ), ):
try:
SCREAMING_SNAKE_CASE_ = item.ha.text
SCREAMING_SNAKE_CASE_ = '''https://www.amazon.in/''' + item.ha.a['''href''']
SCREAMING_SNAKE_CASE_ = item.find('''span''', attrs={'''class''': '''a-offscreen'''} ).text
try:
SCREAMING_SNAKE_CASE_ = item.find('''span''', attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
SCREAMING_SNAKE_CASE_ = '''Not available'''
try:
SCREAMING_SNAKE_CASE_ = (
'''₹'''
+ item.find(
'''span''', attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
SCREAMING_SNAKE_CASE_ = ''''''
try:
SCREAMING_SNAKE_CASE_ = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''', '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''', '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''', '''''' ) )
)
* 1_00 )
except ValueError:
SCREAMING_SNAKE_CASE_ = float('''nan''' )
except AttributeError:
pass
SCREAMING_SNAKE_CASE_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
SCREAMING_SNAKE_CASE_ = ''' '''
SCREAMING_SNAKE_CASE_ = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 597
| 1
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
A : Union[str, Any] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
A : Optional[Any] = dataset.iloc[:, 1:2].values
A : int = dataset.iloc[:, 2].values
A , A , A , A : Tuple = train_test_split(X, y, test_size=0.2, random_state=0)
A : List[str] = PolynomialFeatures(degree=4)
A : Optional[Any] = poly_reg.fit_transform(X)
A : int = LinearRegression()
pol_reg.fit(X_poly, y)
def a__ ( ):
plt.scatter(snake_case__ , snake_case__ , color="red" )
plt.plot(snake_case__ , pol_reg.predict(poly_reg.fit_transform(snake_case__ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 140
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : str=[10, 20, 30, 40] , lowerCAmelCase : Any=[2, 2, 3, 2] , lowerCAmelCase : Any=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : int="gelu" , lowerCAmelCase : List[str]=10 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Tuple=["stage2", "stage3", "stage4"] , lowerCAmelCase : str=[2, 3, 4] , lowerCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_stages
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = scope
def __lowercase ( self : str ):
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Dict ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
lowerCAmelCase = ConvNextVaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase ( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str ):
lowerCAmelCase = ConvNextVaForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str ):
lowerCAmelCase = ConvNextVaBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = ConvNextVaBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ):
_a = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_a = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def __lowercase ( self : Any ):
lowerCAmelCase = ConvNextVaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def __lowercase ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : Dict ):
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __lowercase ( self : Dict ):
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __lowercase ( self : Dict ):
pass
def __lowercase ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase = True
if model_class.__name__ in [
*get_values(lowerCAmelCase ),
*get_values(lowerCAmelCase ),
]:
continue
lowerCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
lowerCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
lowerCAmelCase = model(**lowerCAmelCase ).loss
loss.backward()
def __lowercase ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase = False
lowerCAmelCase = True
if (
model_class.__name__
in [*get_values(lowerCAmelCase ), *get_values(lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
lowerCAmelCase = model(**lowerCAmelCase ).loss
loss.backward()
def __lowercase ( self : Dict ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowerCAmelCase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def __lowercase ( self : str ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
def check_hidden_states_output(lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
lowerCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : int ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def __lowercase ( self : Any ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ConvNextVaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowercase () -> List[str]:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : int ):
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __lowercase ( self : int ):
lowerCAmelCase = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(lowerCAmelCase )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = preprocessor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**lowerCAmelCase )
# verify the logits
lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor([0.9996, 0.1966, -0.4386] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 169
| 0
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> List[Any]:
"""simple docstring"""
def is_in_circle(__magic_name__ : float , __magic_name__ : float ) -> bool:
UpperCamelCase :Union[str, Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCamelCase :Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__magic_name__ ) )
# The ratio of the area for circle to square is pi/4.
UpperCamelCase :Union[str, Any] = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : Callable[[float], float] , __magic_name__ : float = 0.0 , __magic_name__ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(__magic_name__ , __magic_name__ ) ) for _ in range(__magic_name__ ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : float = 0.0 , __magic_name__ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(__magic_name__ : float ) -> float:
return x
UpperCamelCase :Union[str, Any] = area_under_curve_estimator(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
UpperCamelCase :int = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print("""******************""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> None:
"""simple docstring"""
def function_to_integrate(__magic_name__ : float ) -> float:
return sqrt(4.0 - x * x )
UpperCamelCase :Optional[int] = area_under_curve_estimator(
__magic_name__ , __magic_name__ , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 590
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCAmelCase_ : Union[str, Any] = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Tuple = """summarization"""
snake_case__ : Tuple = ["""loss"""]
snake_case__ : int = ROUGE_KEYS
snake_case__ : int = """rouge2"""
def __init__( self : Tuple , __lowerCamelCase : List[str] , **__lowerCamelCase : List[str] ):
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCamelCase :Optional[int] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(__lowerCamelCase , num_labels=__lowerCamelCase , mode=self.mode , **__lowerCamelCase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
UpperCamelCase :str = Path(self.output_dir ) / """metrics.json"""
UpperCamelCase :Dict = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
UpperCamelCase :List[str] = 0
UpperCamelCase :Dict = defaultdict(__lowerCamelCase )
UpperCamelCase :Optional[Any] = self.config.model_type
UpperCamelCase :List[str] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
UpperCamelCase :dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCamelCase :List[str] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
UpperCamelCase :int = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCamelCase :Dict = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCamelCase :Optional[Any] = get_git_info()["""repo_sha"""]
UpperCamelCase :List[str] = hparams.num_workers
UpperCamelCase :Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __lowerCamelCase ):
UpperCamelCase :Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCamelCase :Union[str, Any] = self.decoder_start_token_id
UpperCamelCase :int = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
UpperCamelCase :Union[str, Any] = False
UpperCamelCase :Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCamelCase :int = self.hparams.eval_max_gen_length
else:
UpperCamelCase :Dict = self.model.config.max_length
UpperCamelCase :Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _A ( self : List[Any] , __lowerCamelCase : Dict[str, torch.Tensor] ):
UpperCamelCase :List[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(__lowerCamelCase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
UpperCamelCase :Dict = True
return readable_batch
def _A ( self : Union[str, Any] , __lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ):
return self.model(__lowerCamelCase , **__lowerCamelCase )
def _A ( self : List[Any] , __lowerCamelCase : List[int] ):
UpperCamelCase :Optional[Any] = self.tokenizer.batch_decode(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
return lmap(str.strip , __lowerCamelCase )
def _A ( self : List[Any] , __lowerCamelCase : dict ):
UpperCamelCase :Any = self.tokenizer.pad_token_id
UpperCamelCase , UpperCamelCase :Dict = batch["""input_ids"""], batch["""attention_mask"""]
UpperCamelCase :int = batch["""labels"""]
if isinstance(self.model , __lowerCamelCase ):
UpperCamelCase :List[str] = self.model._shift_right(__lowerCamelCase )
else:
UpperCamelCase :Optional[int] = shift_tokens_right(__lowerCamelCase , __lowerCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCamelCase :Any = decoder_input_ids
self.save_readable_batch(__lowerCamelCase )
UpperCamelCase :int = self(__lowerCamelCase , attention_mask=__lowerCamelCase , decoder_input_ids=__lowerCamelCase , use_cache=__lowerCamelCase )
UpperCamelCase :Any = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCamelCase :Dict = nn.CrossEntropyLoss(ignore_index=__lowerCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
UpperCamelCase :Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCamelCase :Optional[int] = nn.functional.log_softmax(__lowerCamelCase , dim=-1 )
UpperCamelCase , UpperCamelCase :str = label_smoothed_nll_loss(
__lowerCamelCase , __lowerCamelCase , self.hparams.label_smoothing , ignore_index=__lowerCamelCase )
return (loss,)
@property
def _A ( self : List[str] ):
return self.tokenizer.pad_token_id
def _A ( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
UpperCamelCase :Optional[int] = self._step(__lowerCamelCase )
UpperCamelCase :Optional[int] = dict(zip(self.loss_names , __lowerCamelCase ) )
# tokens per batch
UpperCamelCase :Dict = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
UpperCamelCase :Optional[Any] = batch["""input_ids"""].shape[0]
UpperCamelCase :Union[str, Any] = batch["""input_ids"""].eq(self.pad ).sum()
UpperCamelCase :int = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _A ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict ):
return self._generative_step(__lowerCamelCase )
def _A ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict="val" ):
self.step_count += 1
UpperCamelCase :Any = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCamelCase :List[str] = losses["""loss"""]
UpperCamelCase :Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
UpperCamelCase :List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCamelCase :torch.FloatTensor = torch.tensor(__lowerCamelCase ).type_as(__lowerCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__lowerCamelCase )
UpperCamelCase :Optional[Any] = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
UpperCamelCase :Tuple = self.step_count
self.metrics[prefix].append(__lowerCamelCase ) # callback writes this to self.metrics_save_path
UpperCamelCase :List[str] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def _A ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ):
return calculate_rouge(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int , __lowerCamelCase : dict ):
UpperCamelCase :Tuple = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCamelCase :Any = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=__lowerCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCamelCase :Dict = (time.time() - ta) / batch["""input_ids"""].shape[0]
UpperCamelCase :List[str] = self.ids_to_clean_text(__lowerCamelCase )
UpperCamelCase :List[str] = self.ids_to_clean_text(batch["""labels"""] )
UpperCamelCase :List[str] = self._step(__lowerCamelCase )
UpperCamelCase :List[str] = dict(zip(self.loss_names , __lowerCamelCase ) )
UpperCamelCase :Dict = self.calc_generative_metrics(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = np.mean(lmap(__lowerCamelCase , __lowerCamelCase ) )
base_metrics.update(gen_time=__lowerCamelCase , gen_len=__lowerCamelCase , preds=__lowerCamelCase , target=__lowerCamelCase , **__lowerCamelCase )
return base_metrics
def _A ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
return self._generative_step(__lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : Dict ):
return self.validation_epoch_end(__lowerCamelCase , prefix="""test""" )
def _A ( self : Optional[int] , __lowerCamelCase : Any ):
UpperCamelCase :List[Any] = self.n_obs[type_path]
UpperCamelCase :int = self.target_lens[type_path]
UpperCamelCase :Union[str, Any] = self.dataset_class(
self.tokenizer , type_path=__lowerCamelCase , n_obs=__lowerCamelCase , max_target_length=__lowerCamelCase , **self.dataset_kwargs , )
return dataset
def _A ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : bool = False ):
UpperCamelCase :Optional[int] = self.get_dataset(__lowerCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCamelCase :str = dataset.make_sortish_sampler(__lowerCamelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
__lowerCamelCase , batch_size=__lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=__lowerCamelCase , num_workers=self.num_workers , sampler=__lowerCamelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCamelCase :Dict = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__lowerCamelCase , batch_sampler=__lowerCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__lowerCamelCase , batch_size=__lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=__lowerCamelCase , num_workers=self.num_workers , sampler=__lowerCamelCase , )
def _A ( self : Any ):
UpperCamelCase :int = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=__lowerCamelCase )
return dataloader
def _A ( self : Any ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _A ( self : Any ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _A ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
BaseTransformer.add_model_specific_args(__lowerCamelCase , __lowerCamelCase )
add_generic_args(__lowerCamelCase , __lowerCamelCase )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=__lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=__lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=__lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=__lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=__lowerCamelCase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=__lowerCamelCase )
parser.add_argument("""--max_tokens_per_batch""" , type=__lowerCamelCase , default=__lowerCamelCase )
parser.add_argument("""--logger_name""" , type=__lowerCamelCase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=__lowerCamelCase , default=-1 , required=__lowerCamelCase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=__lowerCamelCase , default=500 , required=__lowerCamelCase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=__lowerCamelCase , default=-1 , required=__lowerCamelCase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=__lowerCamelCase , default="""summarization""" , required=__lowerCamelCase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=__lowerCamelCase , default=0.0 , required=__lowerCamelCase )
parser.add_argument("""--src_lang""" , type=__lowerCamelCase , default="""""" , required=__lowerCamelCase )
parser.add_argument("""--tgt_lang""" , type=__lowerCamelCase , default="""""" , required=__lowerCamelCase )
parser.add_argument("""--eval_beams""" , type=__lowerCamelCase , default=__lowerCamelCase , required=__lowerCamelCase )
parser.add_argument(
"""--val_metric""" , type=__lowerCamelCase , default=__lowerCamelCase , required=__lowerCamelCase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=__lowerCamelCase , default=1 , required=__lowerCamelCase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=__lowerCamelCase , default=-1 , required=__lowerCamelCase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Union[str, Any] = """translation"""
snake_case__ : str = ["""loss"""]
snake_case__ : Any = ["""bleu"""]
snake_case__ : List[Any] = """bleu"""
def __init__( self : List[str] , __lowerCamelCase : Tuple , **__lowerCamelCase : Tuple ):
super().__init__(__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :Dict = hparams.src_lang
UpperCamelCase :List[Any] = hparams.tgt_lang
def _A ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
return calculate_bleu(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Tuple=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=__magic_name__ )
check_output_dir(__magic_name__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCamelCase :SummarizationModule = SummarizationModule(__magic_name__ )
else:
UpperCamelCase :SummarizationModule = TranslationModule(__magic_name__ )
UpperCamelCase :Tuple = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
UpperCamelCase :Optional[int] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCamelCase :List[Any] = os.environ.get("""WANDB_PROJECT""" , __magic_name__ )
UpperCamelCase :Any = WandbLogger(name=model.output_dir.name , project=__magic_name__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCamelCase :Union[str, Any] = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
UpperCamelCase :Tuple = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCamelCase :str = False
UpperCamelCase :List[str] = args.val_metric == """loss"""
UpperCamelCase :pl.Trainer = generic_train(
__magic_name__ , __magic_name__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __magic_name__ ) , early_stopping_callback=__magic_name__ , logger=__magic_name__ , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
UpperCamelCase :Optional[int] = """"""
UpperCamelCase :Dict = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=__magic_name__ ) )
if checkpoints:
UpperCamelCase :Union[str, Any] = checkpoints[-1]
UpperCamelCase :Optional[int] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
UpperCAmelCase_ : Optional[int] = pl.Trainer.add_argparse_args(parser)
UpperCAmelCase_ : List[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase_ : Dict = parser.parse_args()
main(args)
| 590
| 1
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowercase : dict , lowercase : str ) ->set[str]:
"""simple docstring"""
lowercase__ , lowercase__ = set(lowercase ), [start]
while stack:
lowercase__ = stack.pop()
explored.add(lowercase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowercase )
return explored
_lowerCAmelCase = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 161
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( a ):
"""simple docstring"""
A_ = 'vit_msn'
def __init__( self , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-06 , _lowerCamelCase=2_2_4 , _lowerCamelCase=1_6 , _lowerCamelCase=3 , _lowerCamelCase=True , **_lowerCamelCase , )-> Optional[Any]:
super().__init__(**_lowerCamelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
| 161
| 1
|
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(__UpperCamelCase , (list, tuple) ) or not all(
isinstance(__UpperCamelCase , __UpperCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
snake_case_ : List[Any] = numbers[0]
for i in range(1 , len(__UpperCamelCase ) ):
# update the maximum and minimum subarray products
snake_case_ : Dict = numbers[i]
if number < 0:
snake_case_ , snake_case_ : Optional[int] = min_till_now, max_till_now
snake_case_ : List[str] = max(__UpperCamelCase , max_till_now * number )
snake_case_ : Any = min(__UpperCamelCase , min_till_now * number )
# update the maximum product found till now
snake_case_ : Tuple = max(__UpperCamelCase , __UpperCamelCase )
return max_prod
| 21
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class A ( UpperCamelCase_ ):
def __init__( self : Optional[Any] , *lowercase_ : str , **lowercase_ : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 464
| 0
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=3 , _lowerCamelCase : str=7 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Tuple=False , _lowerCamelCase : int=True , _lowerCamelCase : Tuple=9_9 , _lowerCamelCase : Union[str, Any]=3_2 , _lowerCamelCase : Optional[Any]=5 , _lowerCamelCase : Union[str, Any]=4 , _lowerCamelCase : Tuple=3_7 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Tuple=5_1_2 , _lowerCamelCase : Optional[int]=1_6 , _lowerCamelCase : List[str]=2 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Optional[Any]=4 , _lowerCamelCase : str=None , ):
'''simple docstring'''
__lowerCamelCase : Dict = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : List[Any] = use_input_mask
__lowerCamelCase : Optional[Any] = use_token_type_ids
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Dict = vocab_size
__lowerCamelCase : Dict = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Dict = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : List[Any] = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : str = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : str = type_sequence_label_size
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : Tuple = num_choices
__lowerCamelCase : int = scope
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : List[str] = None
if self.use_input_mask:
__lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : List[str] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : Tuple = None
__lowerCamelCase : Dict = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : Tuple ):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_lowerCamelCase , )
def _snake_case ( self : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : int ):
'''simple docstring'''
__lowerCamelCase : Any = FalconModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowerCamelCase : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
__lowerCamelCase : int = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , ):
'''simple docstring'''
__lowerCamelCase : int = True
__lowerCamelCase : Optional[Any] = FalconModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowerCamelCase : List[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
__lowerCamelCase : str = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
__lowerCamelCase : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Dict , ):
'''simple docstring'''
__lowerCamelCase : Tuple = FalconForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowerCamelCase : int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , ):
'''simple docstring'''
__lowerCamelCase : Dict = True
__lowerCamelCase : List[str] = True
__lowerCamelCase : Dict = FalconForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# first forward pass
__lowerCamelCase : Optional[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase , )
__lowerCamelCase : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase : Any = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )["""hidden_states"""][0]
__lowerCamelCase : Tuple = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )["""hidden_states"""][0]
# select random slice
__lowerCamelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = config_and_inputs
__lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( A,A,A,unittest.TestCase ):
'''simple docstring'''
a_ : Tuple = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : Union[str, Any] = (FalconForCausalLM,) if is_torch_available() else ()
a_ : Any = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : List[Any] = False
a_ : int = False
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = FalconModelTester(self )
__lowerCamelCase : Tuple = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=3_7 )
def _snake_case ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase , *__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__lowerCamelCase : List[str] = alibi
self.model_tester.create_and_check_model(_lowerCamelCase , *_lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Tuple = 3
__lowerCamelCase : List[str] = input_dict["""input_ids"""]
__lowerCamelCase : Dict = input_ids.ne(1 ).to(_lowerCamelCase )
__lowerCamelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCamelCase : Optional[int] = FalconForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowerCamelCase : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Tuple = 3
__lowerCamelCase : Any = """single_label_classification"""
__lowerCamelCase : Tuple = input_dict["""input_ids"""]
__lowerCamelCase : Dict = input_ids.ne(1 ).to(_lowerCamelCase )
__lowerCamelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCamelCase : Tuple = FalconForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowerCamelCase : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Tuple = input_dict["""input_ids"""]
__lowerCamelCase : Optional[int] = FalconForCausalLM(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowerCamelCase : Dict = model(_lowerCamelCase , use_cache=_lowerCamelCase )
__lowerCamelCase : List[str] = input_ids.shape[0]
__lowerCamelCase : Any = model._convert_to_rw_cache(result.past_key_values )
__lowerCamelCase : Dict = model._convert_cache_to_standard_format(_lowerCamelCase , _lowerCamelCase )
for layer in range(len(_lowerCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[int] = 3
__lowerCamelCase : Optional[int] = """multi_label_classification"""
__lowerCamelCase : Optional[int] = input_dict["""input_ids"""]
__lowerCamelCase : Any = input_ids.ne(1 ).to(_lowerCamelCase )
__lowerCamelCase : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCamelCase : Tuple = FalconForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowerCamelCase : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self : Any ):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_lowerCamelCase , """use_cache""" ):
return
__lowerCamelCase : Tuple = model_class(_lowerCamelCase ).to(_lowerCamelCase )
if "use_cache" not in inputs:
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : Any = model(**_lowerCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__lowerCamelCase : Tuple = (
getattr(_lowerCamelCase , """decoder_layers""" , _lowerCamelCase )
or getattr(_lowerCamelCase , """num_decoder_layers""" , _lowerCamelCase )
or config.num_hidden_layers
)
__lowerCamelCase : str = getattr(_lowerCamelCase , """num_kv_heads""" , config.num_attention_heads )
__lowerCamelCase : Dict = getattr(_lowerCamelCase , """d_model""" , config.hidden_size )
__lowerCamelCase : Any = embed_dim // num_attention_heads
__lowerCamelCase : int = outputs["""past_key_values"""]
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = inputs["""input_ids"""].shape
for i in range(_lowerCamelCase ):
if config.new_decoder_architecture:
__lowerCamelCase : Any = config.num_attention_heads
elif config.multi_query:
__lowerCamelCase : int = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
__lowerCamelCase : List[str] = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(_lowerCamelCase )
__lowerCamelCase : Optional[int] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_lowerCamelCase )
__lowerCamelCase : int = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
__lowerCamelCase : Dict = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=1_9 )
__lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )[0]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__lowerCamelCase : int = AutoTokenizer.from_pretrained(_lowerCamelCase )
__lowerCamelCase : Dict = FalconForCausalLM.from_pretrained(_lowerCamelCase )
model.eval()
model.to(_lowerCamelCase )
__lowerCamelCase : Tuple = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_lowerCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=4 )
model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=4 )
model.generate(**_lowerCamelCase , num_beams=2 , max_new_tokens=4 )
@slow
def _snake_case ( self : List[str] ):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__lowerCamelCase : Any = AutoTokenizer.from_pretrained(_lowerCamelCase )
__lowerCamelCase : Dict = FalconForCausalLM.from_pretrained(_lowerCamelCase )
model.eval()
model.to(device=_lowerCamelCase )
__lowerCamelCase : str = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_lowerCamelCase )
# Test results are the same with and without cache
__lowerCamelCase : Optional[Any] = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=2_0 , use_cache=_lowerCamelCase )
__lowerCamelCase : Any = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=2_0 , use_cache=_lowerCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 458
|
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__lowerCamelCase : str = 6
__lowerCamelCase : Optional[int] = 1
__lowerCamelCase : Optional[int] = 1_901
__lowerCamelCase : Optional[Any] = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__lowerCamelCase : Tuple = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__lowerCamelCase : str = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__lowerCamelCase : Any = day - days_per_month[month - 2]
if month > 12:
year += 1
__lowerCamelCase : Optional[Any] = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 458
| 1
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def snake_case_ ( A_ : list[float] ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = len(A_ )
for i in range(A_ ):
_lowerCamelCase : float = -1
for j in range(i + 1, A_ ):
if arr[i] < arr[j]:
_lowerCamelCase : int = arr[j]
break
result.append(A_ )
return result
def snake_case_ ( A_ : list[float] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
for i, outer in enumerate(A_ ):
_lowerCamelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_lowerCamelCase : List[str] = inner
break
result.append(A_ )
return result
def snake_case_ ( A_ : list[float] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = len(A_ )
_lowerCamelCase : list[float] = []
_lowerCamelCase : list[float] = [-1] * arr_size
for index in reversed(range(A_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_lowerCamelCase : Any = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase__ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 83
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 53
| 0
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = False
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
lowerCAmelCase__ = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
lowerCAmelCase__ = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
lowerCAmelCase__ = reader.read()
lowerCAmelCase__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
lowerCAmelCase__ = UNetaDModel(**config)
else:
lowerCAmelCase__ = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
lowerCAmelCase__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase__ = config[key]
del config[key]
lowerCAmelCase__ = [k.replace("UNetRes", "") for k in config["down_block_types"]]
lowerCAmelCase__ = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
lowerCAmelCase__ = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
lowerCAmelCase__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
lowerCAmelCase__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
lowerCAmelCase__ = param_value
lowerCAmelCase__ = True
if not has_changed:
lowerCAmelCase__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 720
|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(
a_, r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ", )
class lowercase ( a_ ):
def _snake_case ( self , _snake_case) -> np.ndarray:
if self.framework == "tf":
UpperCAmelCase_ : Dict = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
UpperCAmelCase_ : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case)
else:
raise ValueError('Unsupported framework')
return masked_index
def _snake_case ( self , _snake_case) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = self.get_masked_index(_snake_case)
UpperCAmelCase_ : int = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _snake_case ( self , _snake_case) -> int:
if isinstance(_snake_case , _snake_case):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_snake_case)
def _snake_case ( self , _snake_case , _snake_case=None , **_snake_case) -> Dict[str, GenericTensor]:
if return_tensors is None:
UpperCAmelCase_ : Optional[Any] = self.framework
UpperCAmelCase_ : str = self.tokenizer(_snake_case , return_tensors=_snake_case)
self.ensure_exactly_one_mask_token(_snake_case)
return model_inputs
def _snake_case ( self , _snake_case) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.model(**_snake_case)
UpperCAmelCase_ : Optional[Any] = model_inputs['input_ids']
return model_outputs
def _snake_case ( self , _snake_case , _snake_case=5 , _snake_case=None) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ : Optional[int] = target_ids.shape[0]
UpperCAmelCase_ : Union[str, Any] = model_outputs['input_ids'][0]
UpperCAmelCase_ : Optional[Any] = model_outputs['logits']
if self.framework == "tf":
UpperCAmelCase_ : Optional[int] = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
UpperCAmelCase_ : Tuple = outputs.numpy()
UpperCAmelCase_ : Dict = outputs[0, masked_index, :]
UpperCAmelCase_ : List[str] = stable_softmax(_snake_case , axis=-1)
if target_ids is not None:
UpperCAmelCase_ : str = tf.gather_nd(tf.squeeze(_snake_case , 0) , target_ids.reshape(-1 , 1))
UpperCAmelCase_ : str = tf.expand_dims(_snake_case , 0)
UpperCAmelCase_ : int = tf.math.top_k(_snake_case , k=_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ : Dict = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ : int = outputs[0, masked_index, :]
UpperCAmelCase_ : str = logits.softmax(dim=-1)
if target_ids is not None:
UpperCAmelCase_ : List[str] = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = probs.topk(_snake_case)
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
UpperCAmelCase_ : Union[str, Any] = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ : str = target_ids[p].tolist()
UpperCAmelCase_ : Union[str, Any] = p
# Filter padding out:
UpperCAmelCase_ : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ : Tuple = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p]), 'sequence': sequence}
row.append(_snake_case)
result.append(_snake_case)
if single_mask:
return result[0]
return result
def _snake_case ( self , _snake_case , _snake_case=None) -> List[str]:
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ : List[str] = [targets]
try:
UpperCAmelCase_ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : List[Any] = []
for target in targets:
UpperCAmelCase_ : Optional[int] = vocab.get(_snake_case , _snake_case)
if id_ is None:
UpperCAmelCase_ : int = self.tokenizer(
_snake_case , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , max_length=1 , truncation=_snake_case , )['input_ids']
if len(_snake_case) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it')
continue
UpperCAmelCase_ : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.""")
target_ids.append(id_)
UpperCAmelCase_ : Union[str, Any] = list(set(_snake_case))
if len(_snake_case) == 0:
raise ValueError('At least one target must be provided when passed.')
UpperCAmelCase_ : Dict = np.array(_snake_case)
return target_ids
def _snake_case ( self , _snake_case=None , _snake_case=None) -> Dict:
UpperCAmelCase_ : str = {}
if targets is not None:
UpperCAmelCase_ : Dict = self.get_target_ids(_snake_case , _snake_case)
UpperCAmelCase_ : Optional[int] = target_ids
if top_k is not None:
UpperCAmelCase_ : List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.')
return {}, {}, postprocess_params
def __call__( self , _snake_case , *_snake_case , **_snake_case) -> Optional[int]:
UpperCAmelCase_ : Any = super().__call__(_snake_case , **_snake_case)
if isinstance(_snake_case , _snake_case) and len(_snake_case) == 1:
return outputs[0]
return outputs
| 471
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 235
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Dict , a : Optional[Any]=2_048 , a : Union[str, Any]=1 , a : str=[16, 16] , a : Optional[int]=128 , a : str=44_100 , a : List[str]=86 , a : int=2_048 , a : Tuple=0.0 , **a : int , )-> Any:
"""simple docstring"""
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : np.array )-> np.ndarray:
"""simple docstring"""
lowercase__ = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[Any] , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : Union[str, Any] , )-> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowercase__ = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
lowercase__ = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(a ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(a ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowercase__ = {'audio_values': padded_audio_features}
lowercase__ = BatchFeature(data=a , tensor_type=a )
return encoded_inputs
| 235
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ : Dict = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 608
|
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCAmelCase ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Any=1e-12 )-> List[str]:
A_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T
A_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__ , norm_emb_a.T )
class lowerCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = jnp.floataa
def lowercase_ ( self ):
A_ = FlaxCLIPVisionModule(self.config.vision_config )
A_ = nn.Dense(self.config.projection_dim , use_bias=__UpperCamelCase , dtype=self.dtype )
A_ = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
A_ = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
A_ = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
A_ = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self , __UpperCamelCase ):
A_ = self.vision_model(__UpperCamelCase )[1]
A_ = self.visual_projection(__UpperCamelCase )
A_ = jax_cosine_distance(__UpperCamelCase , self.special_care_embeds )
A_ = jax_cosine_distance(__UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
A_ = 0.0
A_ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
A_ = jnp.round(__UpperCamelCase , 3 )
A_ = jnp.any(special_scores > 0 , axis=1 , keepdims=__UpperCamelCase )
# Use a lower threshold if an image has any special care concept
A_ = is_special_care * 0.01
A_ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
A_ = jnp.round(__UpperCamelCase , 3 )
A_ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = CLIPConfig
lowerCAmelCase_ = """clip_input"""
lowerCAmelCase_ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = jnp.floataa , __UpperCamelCase = True , **__UpperCamelCase , ):
if input_shape is None:
A_ = (1, 224, 224, 3)
A_ = self.module_class(config=__UpperCamelCase , dtype=__UpperCamelCase , **__UpperCamelCase )
super().__init__(__UpperCamelCase , __UpperCamelCase , input_shape=__UpperCamelCase , seed=__UpperCamelCase , dtype=__UpperCamelCase , _do_init=_do_init )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None ):
# init input tensor
A_ = jax.random.normal(__UpperCamelCase , __UpperCamelCase )
A_ , A_ = jax.random.split(__UpperCamelCase )
A_ = {"params": params_rng, "dropout": dropout_rng}
A_ = self.module.init(__UpperCamelCase , __UpperCamelCase )["params"]
return random_params
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , ):
A_ = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 608
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCamelCase (unittest.TestCase ):
def __init__( self: List[Any],A_: Dict,A_: List[str]=7,A_: Dict=3,A_: int=18,A_: Optional[Any]=30,A_: Dict=400,A_: int=True,A_: Tuple=None,A_: Any=True,):
'''simple docstring'''
__UpperCamelCase = size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = apply_ocr
def snake_case_ ( self: Dict ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = LayoutLMvaImageProcessingTester(self )
@property
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_,'do_resize' ) )
self.assertTrue(hasattr(A_,'size' ) )
self.assertTrue(hasattr(A_,'apply_ocr' ) )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{'height': 18, 'width': 18} )
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict,size=42 )
self.assertEqual(image_processor.size,{'height': 42, 'width': 42} )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_,Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
self.assertIsInstance(encoding.words,A_ )
self.assertIsInstance(encoding.boxes,A_ )
# Test batched
__UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_,numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_,np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
# Test batched
__UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_,torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_,torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
# Test batched
__UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_docvqa',split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase = image_processing(A_,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ),len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words,A_ )
self.assertListEqual(encoding.boxes,A_ )
# with apply_OCR = False
__UpperCamelCase = LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase = image_processing(A_,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape,(1, 3, 224, 224) )
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase ( __a ):
snake_case : str = """cvt"""
def __init__(self , lowerCAmelCase__=3 , lowerCAmelCase__=[7, 3, 3] , lowerCAmelCase__=[4, 2, 2] , lowerCAmelCase__=[2, 1, 1] , lowerCAmelCase__=[6_4, 1_9_2, 3_8_4] , lowerCAmelCase__=[1, 3, 6] , lowerCAmelCase__=[1, 2, 1_0] , lowerCAmelCase__=[4.0, 4.0, 4.0] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=[0.0, 0.0, 0.1] , lowerCAmelCase__=[True, True, True] , lowerCAmelCase__=[False, False, True] , lowerCAmelCase__=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__=[3, 3, 3] , lowerCAmelCase__=[1, 1, 1] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[1, 1, 1] , lowerCAmelCase__=[1, 1, 1] , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : int = num_channels
_UpperCAmelCase : Any = patch_sizes
_UpperCAmelCase : Any = patch_stride
_UpperCAmelCase : Optional[int] = patch_padding
_UpperCAmelCase : Any = embed_dim
_UpperCAmelCase : List[Any] = num_heads
_UpperCAmelCase : List[Any] = depth
_UpperCAmelCase : Tuple = mlp_ratio
_UpperCAmelCase : Optional[Any] = attention_drop_rate
_UpperCAmelCase : Dict = drop_rate
_UpperCAmelCase : Union[str, Any] = drop_path_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[Any] = cls_token
_UpperCAmelCase : Tuple = qkv_projection_method
_UpperCAmelCase : str = kernel_qkv
_UpperCAmelCase : Tuple = padding_kv
_UpperCAmelCase : Dict = stride_kv
_UpperCAmelCase : int = padding_q
_UpperCAmelCase : Union[str, Any] = stride_q
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
| 414
| 0
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_SCREAMING_SNAKE_CASE : Optional[int] = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class A ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : str = " "):
_lowercase: Optional[Any] = sentence_delimiter
def UpperCAmelCase__ ( self : List[Any] , _UpperCamelCase : str):
return list(_UpperCamelCase)
def UpperCAmelCase__ ( self : Dict , _UpperCamelCase : List[str]):
_lowercase: List[str] = []
for sent_idx, sentence in enumerate(_UpperCamelCase):
chars.extend(self.process_string(_UpperCamelCase))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_UpperCamelCase) - 1:
chars.append(self.sentence_delimiter)
return chars
_SCREAMING_SNAKE_CASE : Union[str, Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_SCREAMING_SNAKE_CASE : Any = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_SCREAMING_SNAKE_CASE : Optional[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
_SCREAMING_SNAKE_CASE : int = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
_SCREAMING_SNAKE_CASE : str = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Union[str, Any]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : int=False):
if concatenate_texts:
return jiwer.compute_measures(
_UpperCamelCase , _UpperCamelCase , truth_transform=_UpperCamelCase , hypothesis_transform=_UpperCamelCase , )["wer"]
_lowercase: Tuple = 0
_lowercase: List[str] = 0
for prediction, reference in zip(_UpperCamelCase , _UpperCamelCase):
_lowercase: List[str] = jiwer.compute_measures(
_UpperCamelCase , _UpperCamelCase , truth_transform=_UpperCamelCase , hypothesis_transform=_UpperCamelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 711
|
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __lowerCAmelCase ( ):
print("Making key files..." )
make_key_files("rsa" , 1_0_2_4 )
print("Key files generation successful." )
def __lowerCAmelCase ( __magic_name__ ):
print("Generating prime p..." )
_lowercase: List[Any] = rabinMiller.generate_large_prime(__magic_name__ )
print("Generating prime q..." )
_lowercase: str = rabinMiller.generate_large_prime(__magic_name__ )
_lowercase: Union[str, Any] = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowercase: int = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__magic_name__ , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowercase: str = cryptoMath.find_mod_inverse(__magic_name__ , (p - 1) * (q - 1) )
_lowercase: str = (n, e)
_lowercase: List[Any] = (n, d)
return (public_key, private_key)
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print("\nWARNING:" )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowercase , _lowercase: Dict = generate_key(__magic_name__ )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt" , "w" ) as out_file:
out_file.write(f"{key_size},{public_key[0]},{public_key[1]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt" , "w" ) as out_file:
out_file.write(f"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 206
| 0
|
'''simple docstring'''
import os
def __a ( ) -> Dict:
with open(os.path.dirname(A__ ) + "/grid.txt" ) as f:
lowerCAmelCase = [] # noqa: E741
for _ in range(20 ):
l.append([int(A__ ) for x in f.readline().split()] )
lowerCAmelCase = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCAmelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCAmelCase = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCAmelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCAmelCase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCAmelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCAmelCase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCAmelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCAmelCase = temp
return maximum
if __name__ == "__main__":
print(solution())
| 649
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int=1_3 , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : int=9_9 , SCREAMING_SNAKE_CASE : int=3_2 , SCREAMING_SNAKE_CASE : Optional[Any]=5 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : str=3_7 , SCREAMING_SNAKE_CASE : List[Any]="gelu" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE : str=1_6 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Any=0.0_2 , SCREAMING_SNAKE_CASE : Any=4 , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __A ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = FlaxRobertaModelTester(self )
@slow
def __A ( self : Any ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained("roberta-base" , from_pt=SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
| 649
| 1
|
"""simple docstring"""
__UpperCamelCase : Tuple = [
(1_0_0_0, "M"),
(9_0_0, "CM"),
(5_0_0, "D"),
(4_0_0, "CD"),
(1_0_0, "C"),
(9_0, "XC"),
(5_0, "L"),
(4_0, "XL"),
(1_0, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def __UpperCAmelCase ( _snake_case : str ):
_lowercase = {"I": 1, "V": 5, "X": 1_0, "L": 5_0, "C": 1_0_0, "D": 5_0_0, "M": 1_0_0_0}
_lowercase = 0
_lowercase = 0
while place < len(_snake_case ):
if (place + 1 < len(_snake_case )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __UpperCAmelCase ( _snake_case : int ):
_lowercase = []
for arabic, roman in ROMAN:
((_lowercase) , (_lowercase)) = divmod(_snake_case, _snake_case )
result.append(roman * factor )
if number == 0:
break
return "".join(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = "T5Config"
class UpperCAmelCase_ ( lowercase__ ):
snake_case_ = """mt5"""
snake_case_ = MTaConfig
class UpperCAmelCase_ ( lowercase__ ):
snake_case_ = """mt5"""
snake_case_ = MTaConfig
class UpperCAmelCase_ ( lowercase__ ):
snake_case_ = """mt5"""
snake_case_ = MTaConfig
| 227
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'luke'
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=50267 , UpperCAmelCase__ : List[Any]=500000 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Dict=256 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : List[str]=3072 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : List[str]=1E-12 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : List[str]=2 , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[Any] =vocab_size
lowercase : Optional[Any] =entity_vocab_size
lowercase : Dict =hidden_size
lowercase : List[str] =entity_emb_size
lowercase : str =num_hidden_layers
lowercase : Dict =num_attention_heads
lowercase : Any =hidden_act
lowercase : str =intermediate_size
lowercase : Any =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : List[str] =max_position_embeddings
lowercase : Union[str, Any] =type_vocab_size
lowercase : Union[str, Any] =initializer_range
lowercase : int =layer_norm_eps
lowercase : str =use_entity_aware_attention
lowercase : Any =classifier_dropout
| 92
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=18 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.48145466, 0.4578275, 0.40821073] , _UpperCamelCase=[0.26862954, 0.26130258, 0.27577711] , _UpperCamelCase=True , ) -> Dict:
lowerCAmelCase_ = size if size is not None else {"height": 224, "width": 224}
lowerCAmelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_convert_rgb
def __a ( self ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __a ( self , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False ) -> Dict:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCAmelCase_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCAmelCase_ = []
for i in range(self.batch_size ):
lowerCAmelCase_ , lowerCAmelCase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCAmelCase_ = [torch.from_numpy(_UpperCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =ChineseCLIPImageProcessor if is_vision_available() else None
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=_UpperCamelCase )
@property
def __a ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Any:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_convert_rgb" ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __a ( self ) -> str:
pass
def __a ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> str:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =ChineseCLIPImageProcessor if is_vision_available() else None
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_UpperCamelCase )
lowerCAmelCase_ = 3
@property
def __a ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_convert_rgb" ) )
def __a ( self ) -> int:
pass
def __a ( self ) -> str:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 290
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def A( snake_case_ , snake_case_=False , snake_case_=False , snake_case_=False ):
"""simple docstring"""
lowercase__: Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def A( snake_case_ , snake_case_ ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase__: List[Any] = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__: Dict = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowercase__: List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__: str = in_proj_weight[
: config.hidden_size, :
]
lowercase__: Dict = in_proj_bias[: config.hidden_size]
lowercase__: Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__: Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__: int = in_proj_weight[
-config.hidden_size :, :
]
lowercase__: Any = in_proj_bias[-config.hidden_size :]
def A( snake_case_ ):
"""simple docstring"""
lowercase__: str = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def A( snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
lowercase__: Dict = dct.pop(snake_case_ )
lowercase__: List[Any] = val
@torch.no_grad()
def A( snake_case_ , snake_case_ ):
"""simple docstring"""
lowercase__: Optional[int] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=snake_case_ )
lowercase__: List[Any] = False
lowercase__: List[Any] = False
lowercase__: Any = False
lowercase__: Any = False
if "vqa" in checkpoint_url:
lowercase__: Tuple = True
lowercase__: str = 3129
lowercase__: List[str] = "huggingface/label-files"
lowercase__: Optional[int] = "vqa2-id2label.json"
lowercase__: Optional[int] = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
lowercase__: List[str] = {int(snake_case_ ): v for k, v in idalabel.items()}
lowercase__: Optional[int] = idalabel
lowercase__: List[str] = {v: k for k, v in idalabel.items()}
lowercase__: Optional[Any] = ViltForQuestionAnswering(snake_case_ )
elif "nlvr" in checkpoint_url:
lowercase__: Union[str, Any] = True
lowercase__: List[str] = 2
lowercase__: Any = {0: "False", 1: "True"}
lowercase__: Union[str, Any] = {v: k for k, v in config.idalabel.items()}
lowercase__: Tuple = 3
lowercase__: List[str] = ViltForImagesAndTextClassification(snake_case_ )
elif "irtr" in checkpoint_url:
lowercase__: Optional[Any] = True
lowercase__: List[str] = ViltForImageAndTextRetrieval(snake_case_ )
elif "mlm_itm" in checkpoint_url:
lowercase__: int = True
lowercase__: List[str] = ViltForMaskedLM(snake_case_ )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
lowercase__: Union[str, Any] = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" )["state_dict"]
lowercase__: List[Any] = create_rename_keys(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ )
if mlm_model or irtr_model:
lowercase__: str = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase__: Optional[int] = model.load_state_dict(snake_case_ , strict=snake_case_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(snake_case_ )
# Define processor
lowercase__: Optional[Any] = ViltImageProcessor(size=384 )
lowercase__: Any = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__: Any = ViltProcessor(snake_case_ , snake_case_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase__: int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=snake_case_ ).raw )
lowercase__: List[Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=snake_case_ ).raw )
lowercase__: Optional[Any] = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
lowercase__: Tuple = processor(snake_case_ , snake_case_ , return_tensors="pt" )
lowercase__: Any = processor(snake_case_ , snake_case_ , return_tensors="pt" )
lowercase__: int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase__: List[Any] = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=snake_case_ ).raw )
if mlm_model:
lowercase__: Dict = "a bunch of [MASK] laying on a [MASK]."
else:
lowercase__: int = "How many cats are there?"
lowercase__: List[Any] = processor(snake_case_ , snake_case_ , return_tensors="pt" )
lowercase__: Union[str, Any] = model(**snake_case_ )
# Verify outputs
if mlm_model:
lowercase__: List[str] = torch.Size([1, 11, 30522] )
lowercase__: List[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , snake_case_ , atol=1E-4 )
# verify masked token prediction equals "cats"
lowercase__: str = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase__: str = torch.Size([1, 3129] )
lowercase__: List[Any] = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , snake_case_ , atol=1E-4 )
# verify vqa prediction equals "2"
lowercase__: Optional[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase__: str = torch.Size([1, 2] )
lowercase__: Optional[int] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 721
|
"""simple docstring"""
from math import factorial
def A( snake_case_ = 20 ):
"""simple docstring"""
lowercase__: Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowercase__: int = n // 2
return int(factorial(snake_case_ ) / (factorial(snake_case_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 120
| 0
|
'''simple docstring'''
import torch
from torch import nn
class _snake_case (nn.Module):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=1 ,_snake_case=False ):
super().__init__()
UpperCAmelCase_ : List[Any] = n_token
UpperCAmelCase_ : int = d_embed
UpperCAmelCase_ : Optional[Any] = d_proj
UpperCAmelCase_ : Tuple = cutoffs + [n_token]
UpperCAmelCase_ : List[str] = [0] + self.cutoffs
UpperCAmelCase_ : str = div_val
UpperCAmelCase_ : List[str] = self.cutoffs[0]
UpperCAmelCase_ : Optional[int] = len(self.cutoffs ) - 1
UpperCAmelCase_ : Tuple = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase_ : str = nn.ModuleList()
UpperCAmelCase_ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) )
else:
self.out_projs.append(_snake_case )
self.out_layers.append(nn.Linear(_snake_case ,_snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) )
self.out_layers.append(nn.Linear(_snake_case ,r_idx - l_idx ) )
UpperCAmelCase_ : Tuple = keep_order
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
if proj is None:
UpperCAmelCase_ : int = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase_ : Tuple = nn.functional.linear(_snake_case ,proj.t().contiguous() )
UpperCAmelCase_ : Dict = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ,_snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase_ : Optional[Any] = hidden[..., :-1, :].contiguous()
UpperCAmelCase_ : Any = labels[..., 1:].contiguous()
UpperCAmelCase_ : Dict = hidden.view(-1 ,hidden.size(-1 ) )
UpperCAmelCase_ : Union[str, Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCAmelCase_ : Optional[Any] = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase_ : Any = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
UpperCAmelCase_ : Any = labels != -1_00
UpperCAmelCase_ : Any = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device )
UpperCAmelCase_ : Union[str, Any] = (
-nn.functional.log_softmax(_snake_case ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase_ : Optional[int] = nn.functional.log_softmax(_snake_case ,dim=-1 )
else:
# construct weights and biases
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Any = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase_ : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase_ : Optional[Any] = self.out_layers[i].weight
UpperCAmelCase_ : str = self.out_layers[i].bias
if i == 0:
UpperCAmelCase_ : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
UpperCAmelCase_ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase_ : Optional[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = nn.functional.log_softmax(_snake_case ,dim=1 )
if labels is None:
UpperCAmelCase_ : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase_ : Any = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Optional[Any] = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase_ : int = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase_ : Any = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase_ : Dict = labels.index_select(0 ,_snake_case ) - l_idx
UpperCAmelCase_ : List[Any] = head_logprob.index_select(0 ,_snake_case )
UpperCAmelCase_ : Optional[int] = hidden.index_select(0 ,_snake_case )
else:
UpperCAmelCase_ : Optional[Any] = hidden
if i == 0:
if labels is not None:
UpperCAmelCase_ : List[str] = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase_ : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase_ : Union[str, Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : int = nn.functional.log_softmax(_snake_case ,dim=1 )
UpperCAmelCase_ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase_ : str = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase_ : Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase_ : Optional[int] = logprob_i
if labels is not None:
if (hasattr(self ,"keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 ,_snake_case ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase__ ( self ,_snake_case ):
if self.n_clusters == 0:
UpperCAmelCase_ : Dict = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(_snake_case ,dim=-1 )
else:
# construct weights and biases
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Tuple = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase_ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase_ : str = self.out_layers[i].weight
UpperCAmelCase_ : str = self.out_layers[i].bias
if i == 0:
UpperCAmelCase_ : List[Any] = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
UpperCAmelCase_ : List[str] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase_ : List[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase_ : List[Any] = nn.functional.log_softmax(_snake_case ,dim=1 )
UpperCAmelCase_ : Optional[int] = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ : int = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase_ : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase_ : int = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : int = nn.functional.log_softmax(_snake_case ,dim=1 )
UpperCAmelCase_ : Tuple = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase_ : Union[str, Any] = logprob_i
return out
| 71
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71
| 1
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 354
|
from jiwer import compute_measures
import datasets
SCREAMING_SNAKE_CASE : Tuple = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
SCREAMING_SNAKE_CASE : Optional[Any] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
SCREAMING_SNAKE_CASE : Any = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCamelCase( datasets.Metric ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence'),
'references': datasets.Value('string', id='sequence'),
}), codebase_urls=['https://github.com/jitsi/jiwer/'], reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
], )
def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=False) -> Optional[Any]:
"""simple docstring"""
if concatenate_texts:
return compute_measures(lowerCamelCase, lowerCamelCase)["wer"]
else:
_lowercase : Dict = 0
_lowercase : Optional[int] = 0
for prediction, reference in zip(lowerCamelCase, lowerCamelCase):
_lowercase : List[Any] = compute_measures(lowerCamelCase, lowerCamelCase)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 354
| 1
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_UpperCamelCase = """."""
if __name__ == "__main__":
_UpperCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
_UpperCamelCase = []
_UpperCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
_UpperCamelCase = line.strip()
_UpperCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_UpperCamelCase = """\n""".join(non_existent_paths)
raise ValueError(f"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 453
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __a ( __magic_name__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , "width_multiplier" ) )
class __a :
"""simple docstring"""
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=2 , snake_case=3 , snake_case="swish" , snake_case=3 , snake_case=32 , snake_case=0.1 , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=10 , snake_case=None , snake_case=0.25 , snake_case=0.0 , snake_case=0.0 , ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Optional[Any] = image_size
lowerCAmelCase__ : Tuple = patch_size
lowerCAmelCase__ : Any = num_channels
lowerCAmelCase__ : Tuple = make_divisible(512 * width_multiplier , divisor=8 )
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Union[str, Any] = conv_kernel_size
lowerCAmelCase__ : List[str] = output_stride
lowerCAmelCase__ : List[Any] = classifier_dropout_prob
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : Optional[int] = num_labels
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : Optional[Any] = width_multiplier
lowerCAmelCase__ : Union[str, Any] = ffn_dropout
lowerCAmelCase__ : Tuple = attn_dropout
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Any = MobileViTVaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowerCAmelCase__ : int = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : Any = MobileViTVaForImageClassification(snake_case )
model.to(snake_case )
model.eval()
lowerCAmelCase__ : int = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
lowerCAmelCase__ : Any = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase__ : Tuple = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = config_and_inputs
lowerCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : List[Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : str = False
__UpperCamelCase : int = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCAmelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(snake_case )
lowerCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : int = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case , snake_case , snake_case ):
lowerCAmelCase__ : List[str] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) )
lowerCAmelCase__ : str = outputs.hidden_states
lowerCAmelCase__ : List[Any] = 5
self.assertEqual(len(snake_case ) , snake_case )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase__ : str = 2
for i in range(len(snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Dict = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Dict = MobileViTVaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
lowerCAmelCase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
snake_case )
lowerCAmelCase__ : Union[str, Any] = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : str = model(**snake_case )
# verify the logits
lowerCAmelCase__ : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
lowerCAmelCase__ : Optional[int] = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase__ : Tuple = model.to(snake_case )
lowerCAmelCase__ : Optional[Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : str = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Any = model(**snake_case )
lowerCAmelCase__ : List[str] = outputs.logits
# verify the logits
lowerCAmelCase__ : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case )
lowerCAmelCase__ : Dict = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase__ : List[Any] = model.to(snake_case )
lowerCAmelCase__ : str = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : List[Any] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**snake_case )
lowerCAmelCase__ : List[Any] = outputs.logits.detach().cpu()
lowerCAmelCase__ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=snake_case , target_sizes=[(50, 60)] )
lowerCAmelCase__ : Union[str, Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case )
lowerCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case )
lowerCAmelCase__ : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case )
| 453
| 1
|
from random import shuffle
import tensorflow as tf
from numpy import array
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Any = int(SCREAMING_SNAKE_CASE_ )
assert noofclusters < len(SCREAMING_SNAKE_CASE_ )
# Find out the dimensionality
UpperCamelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase : List[Any] = list(range(len(SCREAMING_SNAKE_CASE_ ) ) )
shuffle(SCREAMING_SNAKE_CASE_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase : str = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase : List[str] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase : List[str] = tf.placeholder('''float64''' , [dim] )
UpperCamelCase : int = []
for centroid in centroids:
cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase : int = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase : str = tf.placeholder('''int32''' )
UpperCamelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase : Tuple = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase : List[str] = tf.reduce_mean(SCREAMING_SNAKE_CASE_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase : Any = tf.placeholder('''float''' , [dim] )
UpperCamelCase : List[Any] = tf.placeholder('''float''' , [dim] )
UpperCamelCase : Optional[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase : Optional[int] = tf.placeholder('''float''' , [noofclusters] )
UpperCamelCase : Optional[int] = tf.argmin(SCREAMING_SNAKE_CASE_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase : Dict = tf.initialize_all_variables()
# Initialize all variables
sess.run(SCREAMING_SNAKE_CASE_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase : Optional[int] = 1_0_0
for _ in range(SCREAMING_SNAKE_CASE_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Union[str, Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase : List[Any] = [
sess.run(SCREAMING_SNAKE_CASE_ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase : str = sess.run(
SCREAMING_SNAKE_CASE_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(SCREAMING_SNAKE_CASE_ ):
# Collect all the vectors assigned to this cluster
UpperCamelCase : Optional[int] = [
vectors[i]
for i in range(len(SCREAMING_SNAKE_CASE_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase : List[str] = sess.run(
SCREAMING_SNAKE_CASE_ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase : Any = sess.run(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = sess.run(SCREAMING_SNAKE_CASE_ )
return centroids, assignments
| 643
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 643
| 1
|
from __future__ import annotations
import time
import numpy as np
__UpperCamelCase : Dict = [8, 5, 9, 7]
__UpperCamelCase : Any = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__UpperCamelCase : Tuple = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase__ :
def __init__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = claim_vector
SCREAMING_SNAKE_CASE : Tuple = allocated_resources_table
SCREAMING_SNAKE_CASE : List[Any] = maximum_claim_table
def __A ( self : Tuple ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __A ( self : List[Any] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __A ( self : Optional[int] ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCamelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __A ( self : Dict ):
'''simple docstring'''
return {self.__need().index(UpperCamelCase__ ): i for i in self.__need()}
def __A ( self : Any , **UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.__need()
SCREAMING_SNAKE_CASE : Dict = self.__allocated_resources_table
SCREAMING_SNAKE_CASE : Any = self.__available_resources()
SCREAMING_SNAKE_CASE : List[str] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
SCREAMING_SNAKE_CASE : Optional[Any] = False
for each_need in need_list:
SCREAMING_SNAKE_CASE : List[Any] = True
for index, need in enumerate(UpperCamelCase__ ):
if need > available_resources[index]:
SCREAMING_SNAKE_CASE : int = False
break
if execution:
SCREAMING_SNAKE_CASE : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
SCREAMING_SNAKE_CASE : Optional[int] = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(UpperCamelCase__ )
# update available/freed resources stack
SCREAMING_SNAKE_CASE : Optional[int] = np.array(UpperCamelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(UpperCamelCase__ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __A ( self : Tuple ):
'''simple docstring'''
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(UpperCamelCase__ ) + 1}"""
+ ''' '''.join(f"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(UpperCamelCase__ ) + 1}"""
+ ''' '''.join(f"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(UpperCamelCase__ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(UpperCamelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase :Dict = logging.get_logger(__name__)
_lowerCAmelCase :Tuple = {'vocab_file': 'spiece.model'}
_lowerCAmelCase :Optional[int] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
_lowerCAmelCase :Optional[Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
_lowerCAmelCase :Optional[Any] = 0
_lowerCAmelCase :Any = 1
_lowerCAmelCase :int = 2
_lowerCAmelCase :List[str] = 3
_lowerCAmelCase :List[Any] = 4
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ ='''left'''
def __init__( self , A , A=False , A=True , A=False , A="<s>" , A="</s>" , A="<unk>" , A="<sep>" , A="<pad>" , A="<cls>" , A="<mask>" , A=["<eop>", "<eod>"] , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Optional[int] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Tuple = do_lower_case
_UpperCAmelCase : Optional[int] = remove_space
_UpperCAmelCase : Union[str, Any] = keep_accents
_UpperCAmelCase : Union[str, Any] = vocab_file
_UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return len(self.sp_model )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
_UpperCAmelCase : List[Any] = self.__dict__.copy()
_UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self , A ) -> str:
_UpperCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , A ) -> Union[str, Any]:
if self.remove_space:
_UpperCAmelCase : List[Any] = ''' '''.join(inputs.strip().split() )
else:
_UpperCAmelCase : Union[str, Any] = inputs
_UpperCAmelCase : Optional[int] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_UpperCAmelCase : Any = unicodedata.normalize('''NFKD''' , A )
_UpperCAmelCase : int = ''''''.join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
_UpperCAmelCase : str = outputs.lower()
return outputs
def __lowerCAmelCase ( self , A ) -> List[str]:
_UpperCAmelCase : Dict = self.preprocess_text(A )
_UpperCAmelCase : Dict = self.sp_model.encode(A , out_type=A )
_UpperCAmelCase : Any = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_UpperCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_UpperCAmelCase : Dict = cur_pieces[1:]
else:
_UpperCAmelCase : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def __lowerCAmelCase ( self , A ) -> str:
return self.sp_model.PieceToId(A )
def __lowerCAmelCase ( self , A ) -> Any:
return self.sp_model.IdToPiece(A )
def __lowerCAmelCase ( self , A ) -> List[str]:
_UpperCAmelCase : Optional[int] = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = False , A = None , A = True , **A , ) -> str:
_UpperCAmelCase : List[Any] = kwargs.pop('''use_source_tokenizer''' , A )
_UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(A , skip_special_tokens=A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
_UpperCAmelCase : Optional[Any] = []
sub_texts.append(A )
else:
current_sub_text.append(A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_UpperCAmelCase : Dict = ''''''.join(A )
_UpperCAmelCase : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCAmelCase : List[Any] = self.clean_up_tokenization(A )
return clean_text
else:
return text
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1, 1]
return ([0] * len(A )) + [1, 1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[str] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 506
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ : List[str] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=8 ) -> str:
"""simple docstring"""
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple , a__ : UNetaDConditionModel , a__ : DDPMScheduler , a__ : VQModel , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : int , a__ : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any] ):
if latents is None:
UpperCAmelCase = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase = latents.to(a__ )
UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : Optional[Any] , a__ : Union[str, Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __snake_case ( self : Union[str, Any] , a__ : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase, UpperCAmelCase = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : List[Any] ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self : Union[str, Any] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : torch.FloatTensor , a__ : int = 512 , a__ : int = 512 , a__ : int = 100 , a__ : float = 4.0 , a__ : int = 1 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[str] = "pil" , a__ : bool = True , ):
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(a__ , a__ ):
UpperCAmelCase = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
UpperCAmelCase = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
UpperCAmelCase = torch.cat(a__ , dim=0 )
UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(a__ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(a__ , dim=0 )
UpperCAmelCase = hint.repeat_interleave(a__ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
UpperCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
self.scheduler.set_timesteps(a__ , device=a__ )
UpperCAmelCase = self.scheduler.timesteps
UpperCAmelCase = self.movq.config.latent_channels
UpperCAmelCase, UpperCAmelCase = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , a__ , a__ , a__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'''image_embeds''': image_embeds, '''hint''': hint}
UpperCAmelCase = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase, UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase, UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase, UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase, UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(a__ , force_not_quantize=a__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 718
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="speech_to_text_2"
_lowerCamelCase =["past_key_values"]
_lowerCamelCase ={"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , a__ : Any=10000 , a__ : str=6 , a__ : Any=2048 , a__ : Union[str, Any]=4 , a__ : Dict=0.0 , a__ : Any=True , a__ : Tuple="relu" , a__ : Optional[Any]=256 , a__ : Optional[Any]=0.1 , a__ : Any=0.0 , a__ : List[Any]=0.0 , a__ : str=0.02 , a__ : int=2 , a__ : Dict=True , a__ : Union[str, Any]=1 , a__ : List[str]=0 , a__ : Any=2 , a__ : List[Any]=1024 , **a__ : Optional[int] , ):
UpperCAmelCase = vocab_size
UpperCAmelCase = d_model
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = decoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_target_positions
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , decoder_start_token_id=a__ , **a__ , )
| 570
| 0
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str], *,
lowerCamelCase : int = 4, lowerCamelCase : int = 768, lowerCamelCase : int, lowerCamelCase : Tuple, ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.Parameter(torch.zeros(lowerCamelCase ) )
# parameters for additional clip time embeddings
lowercase__ = nn.Linear(lowerCamelCase, lowerCamelCase )
lowercase__ = nn.Linear(lowerCamelCase, lowerCamelCase )
# parameters for encoder hidden states
lowercase__ = clip_extra_context_tokens
lowercase__ = nn.Linear(
lowerCamelCase, self.clip_extra_context_tokens * cross_attention_dim )
lowercase__ = nn.Linear(lowerCamelCase, lowerCamelCase )
lowercase__ = nn.LayerNorm(lowerCamelCase )
def lowercase__ ( self : Any, *, lowerCamelCase : Tuple, lowerCamelCase : List[str], lowerCamelCase : Tuple, lowerCamelCase : Dict ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowercase__ = image_embeddings.shape[0]
lowercase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowercase__ = classifier_free_guidance_embeddings.expand(
lowerCamelCase, -1 )
lowercase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowercase__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowercase__ = self.embedding_proj(lowerCamelCase )
lowercase__ = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase )
lowercase__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowercase__ = self.clip_extra_context_tokens_proj(lowerCamelCase )
lowercase__ = clip_extra_context_tokens.reshape(lowerCamelCase, -1, self.clip_extra_context_tokens )
lowercase__ = clip_extra_context_tokens.permute(0, 2, 1 )
lowercase__ = self.encoder_hidden_states_proj(lowerCamelCase )
lowercase__ = self.text_encoder_hidden_states_norm(lowerCamelCase )
lowercase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 183
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A__ : int = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "://" in dataset_path:
lowercase__ = dataset_path.split('''://''' )[1]
return dataset_path
def a ( lowerCamelCase_ ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = not is_remote_filesystem(lowerCamelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase_ ) , fs._strip_protocol(lowerCamelCase_ ) )
else:
fs.mv(lowerCamelCase_ , lowerCamelCase_ , recursive=lowerCamelCase_ )
def a ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase__ = None
lowercase__ = None
lowercase__ = threading.Lock()
| 183
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Union[str, Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = ['''PoolFormerFeatureExtractor''']
UpperCamelCase__ : Any = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 178
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = '''beit'''
def __init__( self : Tuple , lowerCAmelCase__ : List[Any]=8_1_9_2 , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : Dict=3_0_7_2 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : int=0.0 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Any=1E-12 , lowerCAmelCase__ : Dict=2_2_4 , lowerCAmelCase__ : List[str]=1_6 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=[3, 5, 7, 1_1] , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=2_5_6 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Union[str, Any]=2_5_5 , **lowerCAmelCase__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Any = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = patch_size
__SCREAMING_SNAKE_CASE : int = num_channels
__SCREAMING_SNAKE_CASE : Tuple = use_mask_token
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = use_relative_position_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : Optional[int] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Any = drop_path_rate
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : Any = out_indices
__SCREAMING_SNAKE_CASE : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : List[Any] = use_auxiliary_head
__SCREAMING_SNAKE_CASE : str = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE : str = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : int = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Dict = semantic_loss_ignore_index
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[str] = version.parse('''1.11''' )
@property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return 1E-4
| 178
| 1
|
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCamelCase__ ( __lowerCAmelCase : Iterable[str] , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = iter(__lowerCAmelCase )
while True:
lowerCAmelCase_ = tuple(itertools.islice(__lowerCAmelCase , __lowerCAmelCase ) )
if not chunk:
return
yield chunk
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowerCAmelCase_ = ""
if len(__lowerCAmelCase ) < 2:
return dirty
for i in range(len(__lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowerCAmelCase ) & 1:
clean += "X"
return clean
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCAmelCase_ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowerCAmelCase )
return table
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = generate_table(__lowerCAmelCase )
lowerCAmelCase_ = prepare_input(__lowerCAmelCase )
lowerCAmelCase_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
lowerCAmelCase_ , lowerCAmelCase_ = divmod(table.index(__lowerCAmelCase ) , 5 )
lowerCAmelCase_ , lowerCAmelCase_ = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = generate_table(__lowerCAmelCase )
lowerCAmelCase_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
lowerCAmelCase_ , lowerCAmelCase_ = divmod(table.index(__lowerCAmelCase ) , 5 )
lowerCAmelCase_ , lowerCAmelCase_ = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 290
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_A = logging.get_logger(__name__)
_A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class _lowerCAmelCase ( __a ):
_lowercase ='''bloom'''
_lowercase =['''past_key_values''']
_lowercase ={
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , _UpperCamelCase=250_880 , _UpperCamelCase=64 , _UpperCamelCase=2 , _UpperCamelCase=8 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=1 , _UpperCamelCase=2 , _UpperCamelCase=False , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=False , **_UpperCamelCase , ) -> str:
lowerCAmelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase_ = kwargs.pop("n_embed" , _UpperCamelCase )
lowerCAmelCase_ = hidden_size if n_embed is None else n_embed
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = layer_norm_epsilon
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = pretraining_tp
lowerCAmelCase_ = apply_residual_connection_post_layernorm
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = slow_but_exact
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
class _lowerCAmelCase ( __a ):
_lowercase =version.parse('''1.12''' )
def __init__( self , _UpperCamelCase , _UpperCamelCase = "default" , _UpperCamelCase = None , _UpperCamelCase = False , ) -> int:
super().__init__(_UpperCamelCase , task=_UpperCamelCase , patching_specs=_UpperCamelCase , use_past=_UpperCamelCase )
if not getattr(self._config , "pad_token_id" , _UpperCamelCase ):
# TODO: how to do that better?
lowerCAmelCase_ = 0
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCamelCase , direction="inputs" , inverted_values_shape=_UpperCamelCase )
lowerCAmelCase_ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __a ( self ) -> int:
return self._config.n_layer
@property
def __a ( self ) -> int:
return self._config.n_head
@property
def __a ( self ) -> float:
return 1e-3
def __a ( self , _UpperCamelCase , _UpperCamelCase = -1 , _UpperCamelCase = -1 , _UpperCamelCase = False , _UpperCamelCase = None , ) -> Mapping[str, Any]:
lowerCAmelCase_ = super(_UpperCamelCase , self ).generate_dummy_inputs(
_UpperCamelCase , batch_size=_UpperCamelCase , seq_length=_UpperCamelCase , is_pair=_UpperCamelCase , framework=_UpperCamelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase_ = seqlen + 2
lowerCAmelCase_ = self._config.hidden_size // self.num_attention_heads
lowerCAmelCase_ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowerCAmelCase_ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowerCAmelCase_ = [
(torch.zeros(_UpperCamelCase ), torch.zeros(_UpperCamelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase_ = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase_ = ordered_inputs["attention_mask"].dtype
lowerCAmelCase_ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_UpperCamelCase , _UpperCamelCase , dtype=_UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def __a ( self ) -> int:
return 13
| 290
| 1
|
__snake_case :Optional[Any] =0 # The first color of the flag.
__snake_case :List[str] =1 # The second color of the flag.
__snake_case :Any =2 # The third color of the flag.
__snake_case :str =(red, white, blue)
def lowerCamelCase_ ( lowerCAmelCase__ : list ) -> list:
'''simple docstring'''
if not sequence:
return []
if len(lowerCAmelCase__ ) == 1:
return list(lowerCAmelCase__ )
A = 0
A = len(lowerCAmelCase__ ) - 1
A = 0
while mid <= high:
if sequence[mid] == colors[0]:
A , A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
A , A = sequence[high], sequence[mid]
high -= 1
else:
A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(lowerCAmelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case :Dict =input('Enter numbers separated by commas:\n').strip()
__snake_case :Optional[Any] =[int(item.strip()) for item in user_input.split(',')]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 224
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ :
def __init__( self : str , __UpperCamelCase : Any , __UpperCamelCase : Dict=13 , __UpperCamelCase : Tuple=30 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=True , __UpperCamelCase : Dict=32 , __UpperCamelCase : Tuple=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : str=37 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=10 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Any=0.6 , __UpperCamelCase : List[Any]=None , ) -> List[str]:
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = mask_ratio
A = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __UpperCamelCase ( self : Tuple ) -> str:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : int ) -> List[Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ) -> Union[str, Any]:
A = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[str] ) -> Optional[int]:
A = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
A = (self.image_size // self.patch_size) ** 2
A = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A = 1
A = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
A = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Tuple = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
A_ : List[str] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
A_ : Tuple = False
A_ : str = False
A_ : Tuple = False
A_ : Union[str, Any] = False
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
A = ViTMAEModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __UpperCamelCase ( self : Any ) -> List[Any]:
pass
def __UpperCamelCase ( self : Tuple ) -> int:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __UpperCamelCase ( self : Any ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ) -> int:
# make masks reproducible
np.random.seed(2 )
A = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
A = outputs[0].cpu().numpy()
A = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
A = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
A = after_outputs[0].cpu().numpy()
A = 0
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __UpperCamelCase ( self : List[Any] ) -> int:
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __UpperCamelCase ( self : int ) -> Dict:
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __UpperCamelCase ( self : List[Any] ) -> Any:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
pass
@slow
def __UpperCamelCase ( self : Tuple ) -> Tuple:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Any ) -> Any:
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A = ViTMAEConfig()
A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
A = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1e-4 ) )
| 224
| 1
|
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase = 4_00_00_00 ) -> int:
lowerCamelCase_ = [0, 1]
lowerCamelCase_ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase_ = 0
for j in range(len(__UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__UpperCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 678
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowercase ( A ):
'''simple docstring'''
_A : Any = '''cvt'''
def __init__( self : Optional[int] , _a : Any=3 , _a : Optional[Any]=[7, 3, 3] , _a : Any=[4, 2, 2] , _a : Tuple=[2, 1, 1] , _a : Any=[64, 192, 384] , _a : List[Any]=[1, 3, 6] , _a : Dict=[1, 2, 10] , _a : int=[4.0, 4.0, 4.0] , _a : int=[0.0, 0.0, 0.0] , _a : Dict=[0.0, 0.0, 0.0] , _a : Union[str, Any]=[0.0, 0.0, 0.1] , _a : List[Any]=[True, True, True] , _a : Dict=[False, False, True] , _a : List[str]=["dw_bn", "dw_bn", "dw_bn"] , _a : str=[3, 3, 3] , _a : int=[1, 1, 1] , _a : int=[2, 2, 2] , _a : Dict=[1, 1, 1] , _a : Union[str, Any]=[1, 1, 1] , _a : List[str]=0.02 , _a : Any=1E-12 , **_a : Optional[int] , ):
super().__init__(**_a )
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = patch_stride
UpperCamelCase__ = patch_padding
UpperCamelCase__ = embed_dim
UpperCamelCase__ = num_heads
UpperCamelCase__ = depth
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = attention_drop_rate
UpperCamelCase__ = drop_rate
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = cls_token
UpperCamelCase__ = qkv_projection_method
UpperCamelCase__ = kernel_qkv
UpperCamelCase__ = padding_kv
UpperCamelCase__ = stride_kv
UpperCamelCase__ = padding_q
UpperCamelCase__ = stride_q
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
| 591
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
lowercase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : str, UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[str] ):
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCamelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ )
if weight_type is not None:
UpperCamelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ ).shape
else:
UpperCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCamelCase__ = value
elif weight_type == "weight_g":
UpperCamelCase__ = value
elif weight_type == "weight_v":
UpperCamelCase__ = value
elif weight_type == "bias":
UpperCamelCase__ = value
else:
UpperCamelCase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : List[str] ):
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = fairseq_model.state_dict()
UpperCamelCase__ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, hf_model.config.feat_extract_norm == '''group''', )
UpperCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCamelCase__ = True
if "*" in mapped_key:
UpperCamelCase__ = name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
UpperCamelCase__ = mapped_key.replace('''*''', UpperCamelCase__ )
if "weight_g" in name:
UpperCamelCase__ = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ = '''weight'''
else:
UpperCamelCase__ = None
set_recursively(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ = name.split('''.''' )
UpperCamelCase__ = int(items[0] )
UpperCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCamelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCamelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCamelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCamelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : List[str]=None, UpperCamelCase__ : List[Any]=True ):
'''simple docstring'''
if config_path is not None:
UpperCamelCase__ = UniSpeechSatConfig.from_pretrained(UpperCamelCase__ )
else:
UpperCamelCase__ = UniSpeechSatConfig()
UpperCamelCase__ = ''''''
if is_finetuned:
UpperCamelCase__ = UniSpeechSatForCTC(UpperCamelCase__ )
else:
UpperCamelCase__ = UniSpeechSatForPreTraining(UpperCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
UpperCamelCase__ = model[0].eval()
recursively_load_weights(UpperCamelCase__, UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowercase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 591
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : str = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = ["""MobileNetV2FeatureExtractor"""]
__snake_case : Dict = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 540
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCamelCase__ :
def __init__( self ,A = None ):
if components is None:
UpperCAmelCase = []
UpperCAmelCase = list(A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(A ,self.__components ) ) + ")"
def __add__( self ,A ):
UpperCAmelCase = len(self )
if size == len(A ):
UpperCAmelCase = [self.__components[i] + other.component(A ) for i in range(A )]
return Vector(A )
else:
raise Exception("""must have the same size""" )
def __sub__( self ,A ):
UpperCAmelCase = len(self )
if size == len(A ):
UpperCAmelCase = [self.__components[i] - other.component(A ) for i in range(A )]
return Vector(A )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self ,A ):
...
@overload
def __mul__( self ,A ):
...
def __mul__( self ,A ):
if isinstance(A ,(float, int) ):
UpperCAmelCase = [c * other for c in self.__components]
return Vector(A )
elif isinstance(A ,A ) and len(self ) == len(A ):
UpperCAmelCase = len(self )
UpperCAmelCase = [self.__components[i] * other.component(A ) for i in range(A )]
return sum(A )
else: # error case
raise Exception("""invalid operand!""" )
def _UpperCamelCase ( self ):
return Vector(self.__components )
def _UpperCamelCase ( self ,A ):
if isinstance(A ,A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def _UpperCamelCase ( self ,A ,A ):
assert -len(self.__components ) <= pos < len(self.__components )
UpperCAmelCase = value
def _UpperCamelCase ( self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
UpperCAmelCase = [c**2 for c in self.__components]
return math.sqrt(sum(A ) )
def _UpperCamelCase ( self ,A ,A = False ):
UpperCAmelCase = self * other
UpperCAmelCase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _a ( _snake_case ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
return Vector([0] * dimension )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case ) and (isinstance(_snake_case , _snake_case ))
UpperCAmelCase = [0] * dimension
UpperCAmelCase = 1
return Vector(_snake_case )
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (isinstance(_snake_case , (int, float) ))
)
return x * scalar + y
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
random.seed(_snake_case )
UpperCAmelCase = [random.randint(_snake_case , _snake_case ) for _ in range(_snake_case )]
return Vector(_snake_case )
class lowerCamelCase__ :
def __init__( self ,A ,A ,A ):
UpperCAmelCase = matrix
UpperCAmelCase = w
UpperCAmelCase = h
def __str__( self ):
UpperCAmelCase = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self ,A ):
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase = []
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] + other.component(A ,A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A ,self.__width ,self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self ,A ):
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase = []
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] - other.component(A ,A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A ,self.__width ,self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self ,A ):
...
@overload
def __mul__( self ,A ):
...
def __mul__( self ,A ):
if isinstance(A ,A ): # matrix-vector
if len(A ) == self.__width:
UpperCAmelCase = zero_vector(self.__height )
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] * other.component(A )
for j in range(self.__width )
]
ans.change_component(A ,sum(A ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(A ,(int, float) ): # matrix-scalar
UpperCAmelCase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A ,self.__width ,self.__height )
return None
def _UpperCamelCase ( self ):
return self.__height
def _UpperCamelCase ( self ):
return self.__width
def _UpperCamelCase ( self ,A ,A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def _UpperCamelCase ( self ,A ,A ,A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCAmelCase = value
else:
raise Exception("""change_component: indices out of bounds""" )
def _UpperCamelCase ( self ,A ,A ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
UpperCAmelCase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A ) ):
UpperCAmelCase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A ,self.__width - 1 ,self.__height - 1 ).determinant()
def _UpperCamelCase ( self ,A ,A ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A ,A )
else:
raise Exception("""Indices out of bounds""" )
def _UpperCamelCase ( self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCAmelCase = [
self.__matrix[0][y] * self.cofactor(0 ,A ) for y in range(self.__width )
]
return sum(A )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = [[0] * n for _ in range(_snake_case )]
return Matrix(_snake_case , _snake_case , _snake_case )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
random.seed(_snake_case )
UpperCAmelCase = [
[random.randint(_snake_case , _snake_case ) for _ in range(_snake_case )] for _ in range(_snake_case )
]
return Matrix(_snake_case , _snake_case , _snake_case )
| 341
| 0
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A_ = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
A_ = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A_ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A_ = sorted(arg_to_scheduler.keys())
A_ = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class SCREAMING_SNAKE_CASE_ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="base" , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_lowerCAmelCase )
lowerCamelCase__ = 0
lowerCamelCase__ = Path(self.hparams.output_dir )
lowerCamelCase__ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowerCamelCase__ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=_lowerCAmelCase , **_lowerCAmelCase , )
else:
lowerCamelCase__ = config
lowerCamelCase__ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(self.config , _lowerCAmelCase ), F"model config doesn't have a `{p}` attribute"
setattr(self.config , _lowerCAmelCase , getattr(self.hparams , _lowerCAmelCase ) )
if tokenizer is None:
lowerCamelCase__ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_lowerCAmelCase , )
else:
lowerCamelCase__ = tokenizer
lowerCamelCase__ = MODEL_MODES[mode]
if model is None:
lowerCamelCase__ = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_lowerCAmelCase , )
else:
lowerCamelCase__ = model
def __magic_name__ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
lowerCamelCase__ = self.model_type.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = arg_to_scheduler[self.hparams.lr_scheduler]
lowerCamelCase__ = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
lowerCamelCase__ = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def __magic_name__ ( self ):
lowerCamelCase__ = self.model
lowerCamelCase__ = ["bias", "LayerNorm.weight"]
lowerCamelCase__ = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
lowerCamelCase__ = Adafactor(
_lowerCAmelCase , lr=self.hparams.learning_rate , scale_parameter=_lowerCAmelCase , relative_step=_lowerCAmelCase )
else:
lowerCamelCase__ = AdamW(
_lowerCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
lowerCamelCase__ = optimizer
lowerCamelCase__ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
return self.validation_step(_lowerCAmelCase , _lowerCAmelCase )
def __magic_name__ ( self , _lowerCAmelCase ):
return self.validation_end(_lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
lowerCamelCase__ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __magic_name__ ( self , _lowerCAmelCase ):
if stage == "test":
lowerCamelCase__ = len(self.test_dataloader().dataset )
else:
lowerCamelCase__ = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=_lowerCAmelCase )
lowerCamelCase__ = len(self.train_dataloader().dataset )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ):
raise NotImplementedError("You must implement this for your task" )
def __magic_name__ ( self ):
return self.train_loader
def __magic_name__ ( self ):
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=_lowerCAmelCase )
def __magic_name__ ( self ):
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=_lowerCAmelCase )
def __magic_name__ ( self , _lowerCAmelCase ):
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
_lowerCAmelCase , list(filter(_lowerCAmelCase , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __magic_name__ ( self , _lowerCAmelCase ):
lowerCamelCase__ = self.output_dir.joinpath("best_tfmr" )
lowerCamelCase__ = self.step_count
self.model.save_pretrained(_lowerCAmelCase )
self.tokenizer.save_pretrained(_lowerCAmelCase )
@staticmethod
def __magic_name__ ( _lowerCAmelCase , _lowerCAmelCase ):
parser.add_argument(
"--model_name_or_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=_lowerCAmelCase , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(_lowerCAmelCase ).parent / "test_run" / "cache" ) , type=_lowerCAmelCase , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=_lowerCAmelCase , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=_lowerCAmelCase , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=_lowerCAmelCase , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=_lowerCAmelCase , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5E-5 , type=_lowerCAmelCase , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=_lowerCAmelCase , metavar=_lowerCAmelCase , type=_lowerCAmelCase , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=_lowerCAmelCase , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=_lowerCAmelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=_lowerCAmelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=_lowerCAmelCase , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=_lowerCAmelCase )
parser.add_argument("--train_batch_size" , default=32 , type=_lowerCAmelCase )
parser.add_argument("--eval_batch_size" , default=32 , type=_lowerCAmelCase )
parser.add_argument("--adafactor" , action="store_true" )
class SCREAMING_SNAKE_CASE_ ( pl.Callback ):
"""simple docstring"""
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class SCREAMING_SNAKE_CASE_ ( pl.Callback ):
"""simple docstring"""
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_lowerCAmelCase )
class SCREAMING_SNAKE_CASE_ ( pl.Callback ):
"""simple docstring"""
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase__ = {F"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_lowerCAmelCase )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
rank_zero_info("***** Validation results *****" )
lowerCamelCase__ = trainer.callback_metrics
# Log results
for key in sorted(_lowerCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(_lowerCAmelCase , str(metrics[key] ) ) )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
rank_zero_info("***** Test results *****" )
lowerCamelCase__ = trainer.callback_metrics
# Log and save results to file
lowerCamelCase__ = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(_lowerCAmelCase , "w" ) as writer:
for key in sorted(_lowerCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(_lowerCAmelCase , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(_lowerCAmelCase , str(metrics[key] ) ) )
def __UpperCamelCase ( a, a) ->None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir", default=str(Path(a).parent / "test_run" / "model_checkpoints"), type=a, help="The output directory where the model predictions and checkpoints will be written.", )
parser.add_argument(
"--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", )
parser.add_argument(
"--fp16_opt_level", type=a, default="O2", help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
), )
parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=a)
parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=a, help="Max gradient norm")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--gradient_accumulation_steps", dest="accumulate_grad_batches", type=a, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", )
parser.add_argument("--seed", type=a, default=42, help="random seed for initialization")
parser.add_argument(
"--data_dir", default=str(Path(a).parent / "test_run" / "dummy-train-data"), type=a, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", )
def __UpperCamelCase ( a, a, a=None, a=True, a=[], a=None, a=None, **a, ) ->Union[str, Any]:
pl.seed_everything(args.seed)
# init model
lowerCamelCase__ = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=a)
# add custom checkpoints
if checkpoint_callback is None:
lowerCamelCase__ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1)
if early_stopping_callback:
extra_callbacks.append(a)
if logging_callback is None:
lowerCamelCase__ = LoggingCallback()
lowerCamelCase__ = {}
if args.fpaa:
lowerCamelCase__ = 16
if args.gpus > 1:
lowerCamelCase__ = "auto"
lowerCamelCase__ = "ddp"
lowerCamelCase__ = args.accumulate_grad_batches
lowerCamelCase__ = None
lowerCamelCase__ = "auto"
lowerCamelCase__ = pl.Trainer.from_argparse_args(
a, weights_summary=a, callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], logger=a, val_check_interval=1, num_sanity_val_steps=2, **a, )
if args.do_train:
trainer.fit(a)
else:
print("RAG modeling tests with new set functions successfuly executed!")
return trainer
| 360
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360
| 1
|
def _a ( SCREAMING_SNAKE_CASE = 10_00 ):
"""simple docstring"""
lowercase__ = 2**power
lowercase__ = str(SCREAMING_SNAKE_CASE )
lowercase__ = list(SCREAMING_SNAKE_CASE )
lowercase__ = 0
for i in list_num:
sum_of_num += int(SCREAMING_SNAKE_CASE )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
lowerCAmelCase = solution(power)
print('Sum of the digits is: ', result)
| 43
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43
| 1
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case__ :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]="resnet50" , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int=True , ) -> int:
UpperCAmelCase_ = parent
UpperCAmelCase_ = out_indices if out_indices is not None else [4]
UpperCAmelCase_ = stage_names
UpperCAmelCase_ = out_features
UpperCAmelCase_ = backbone
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = use_pretrained_backbone
UpperCAmelCase_ = is_training
def UpperCamelCase ( self : Any ) -> Dict:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ = TimmBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCamelCase ( self : Tuple ) -> Any:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__A = (TimmBackbone,) if is_torch_available() else ()
__A = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
__A = False
__A = False
__A = False
__A = False
def UpperCamelCase ( self : str ) -> Optional[Any]:
UpperCAmelCase_ = TimmBackboneModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def UpperCamelCase ( self : str ) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase_ = '''resnet18'''
UpperCAmelCase_ = '''microsoft/resnet-18'''
UpperCAmelCase_ = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ )
UpperCAmelCase_ = AutoBackbone.from_pretrained(lowerCAmelCase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCAmelCase_ = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ , out_indices=[1, 2, 3] )
UpperCAmelCase_ = AutoBackbone.from_pretrained(lowerCAmelCase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def UpperCamelCase ( self : List[Any] ) -> List[str]:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def UpperCamelCase ( self : str ) -> str:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def UpperCamelCase ( self : Any ) -> str:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def UpperCamelCase ( self : Tuple ) -> List[str]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def UpperCamelCase ( self : Tuple ) -> int:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def UpperCamelCase ( self : Union[str, Any] ) -> int:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def UpperCamelCase ( self : List[str] ) -> Any:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def UpperCamelCase ( self : str ) -> Optional[int]:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def UpperCamelCase ( self : Any ) -> Tuple:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase ( self : int ) -> int:
pass
def UpperCamelCase ( self : Optional[int] ) -> str:
UpperCAmelCase_, UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase_ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def UpperCamelCase ( self : int ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCAmelCase_ = self.all_model_classes[0]
UpperCAmelCase_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = model(**lowerCAmelCase_ )
UpperCAmelCase_ = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCAmelCase_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCamelCase ( self : List[Any] ) -> Dict:
UpperCAmelCase_, UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase_ )
UpperCAmelCase_ = False
UpperCAmelCase_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ = model(**lowerCAmelCase_ )
| 407
|
def _lowerCAmelCase ( __magic_name__ :int , __magic_name__ :int ):
return int((input_a, input_a).count(0 ) == 0 )
def _lowerCAmelCase ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 407
| 1
|
"""simple docstring"""
snake_case_ : str = {"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []}
snake_case_ : Optional[int] = ["""a""", """b""", """c""", """d""", """e"""]
def lowercase_ ( _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Any = start
# add current to visited
visited.append(_lowercase )
UpperCAmelCase : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCAmelCase : str = topological_sort(_lowercase , _lowercase , _lowercase )
# if all neighbors visited add current to sort
sort.append(_lowercase )
# if all vertices haven't been visited select a new one to visit
if len(_lowercase ) != len(_lowercase ):
for vertice in vertices:
if vertice not in visited:
UpperCAmelCase : Tuple = topological_sort(_lowercase , _lowercase , _lowercase )
# return sort
return sort
if __name__ == "__main__":
snake_case_ : Dict = topological_sort("""a""", [], [])
print(sort)
| 595
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class snake_case__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase : int = text_generator("This is a test" , do_sample=lowercase )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
UpperCAmelCase : List[Any] = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
lowercase , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
UpperCAmelCase : Any = text_generator("This is a test" , do_sample=lowercase , num_return_sequences=2 , return_tensors=lowercase )
self.assertEqual(
lowercase , [
{"generated_token_ids": ANY(lowercase )},
{"generated_token_ids": ANY(lowercase )},
] , )
UpperCAmelCase : Dict = text_generator.model.config.eos_token_id
UpperCAmelCase : List[str] = "<pad>"
UpperCAmelCase : List[str] = text_generator(
["This is a test", "This is a second test"] , do_sample=lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase , )
self.assertEqual(
lowercase , [
[
{"generated_token_ids": ANY(lowercase )},
{"generated_token_ids": ANY(lowercase )},
],
[
{"generated_token_ids": ANY(lowercase )},
{"generated_token_ids": ANY(lowercase )},
],
] , )
@require_tf
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase : Union[str, Any] = text_generator("This is a test" , do_sample=lowercase )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
UpperCAmelCase : List[str] = text_generator(["This is a test", "This is a second test"] , do_sample=lowercase )
self.assertEqual(
lowercase , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def __lowerCAmelCase ( self : str , lowercase : str , lowercase : str , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = TextGenerationPipeline(model=lowercase , tokenizer=lowercase )
return text_generator, ["This is a test", "Another test"]
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Tuple = "Hello I believe in"
UpperCAmelCase : Dict = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase : Union[str, Any] = text_generator(lowercase )
self.assertEqual(
lowercase , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
UpperCAmelCase : Optional[int] = text_generator(lowercase , stop_sequence=" fe" )
self.assertEqual(lowercase , [{"generated_text": "Hello I believe in fe"}] )
def __lowerCAmelCase ( self : str , lowercase : int , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = text_generator.model
UpperCAmelCase : Tuple = text_generator.tokenizer
UpperCAmelCase : Tuple = text_generator("This is a test" )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCAmelCase : int = text_generator("This is a test" , return_full_text=lowercase )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
UpperCAmelCase : Tuple = pipeline(task="text-generation" , model=lowercase , tokenizer=lowercase , return_full_text=lowercase )
UpperCAmelCase : Any = text_generator("This is a test" )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
UpperCAmelCase : int = text_generator("This is a test" , return_full_text=lowercase )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCAmelCase : Union[str, Any] = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCAmelCase : int = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
] , )
with self.assertRaises(lowercase ):
UpperCAmelCase : Optional[int] = text_generator("test" , return_full_text=lowercase , return_text=lowercase )
with self.assertRaises(lowercase ):
UpperCAmelCase : Tuple = text_generator("test" , return_full_text=lowercase , return_tensors=lowercase )
with self.assertRaises(lowercase ):
UpperCAmelCase : List[Any] = text_generator("test" , return_text=lowercase , return_tensors=lowercase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCAmelCase : List[str] = text_generator("" )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCAmelCase : Dict = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCAmelCase : Union[str, Any] = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 5_00 , max_new_tokens=20 )
UpperCAmelCase : Tuple = text_generator("This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowercase ):
text_generator(
"This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
import torch
# Classic `model_kwargs`
UpperCAmelCase : List[Any] = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase : Union[str, Any] = pipe("This is a test" )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCAmelCase : List[Any] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase : Optional[int] = pipe("This is a test" )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCAmelCase : List[str] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCAmelCase : List[Any] = pipe("This is a test" )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
import torch
UpperCAmelCase : Tuple = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
import torch
UpperCAmelCase : Any = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=lowercase , top_p=0.5 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "Hello world"
UpperCAmelCase : Optional[int] = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
UpperCAmelCase : Optional[int] = logging.get_logger("transformers.generation.tf_utils" )
else:
UpperCAmelCase : List[str] = logging.get_logger("transformers.generation.utils" )
UpperCAmelCase : List[Any] = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowercase ) as cl:
UpperCAmelCase : str = text_generator(lowercase , max_length=10 , max_new_tokens=1 )
self.assertIn(lowercase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowercase ) as cl:
UpperCAmelCase : List[str] = text_generator(lowercase , max_new_tokens=1 )
self.assertNotIn(lowercase , cl.out )
with CaptureLogger(lowercase ) as cl:
UpperCAmelCase : int = text_generator(lowercase , max_length=10 )
self.assertNotIn(lowercase , cl.out )
| 595
| 1
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case (UpperCamelCase : str , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def snake_case (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ = tmp_path / """cache"""
lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__ = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def snake_case (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ = tmp_path / """cache"""
lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase__ = features.copy() if features else default_expected_features
lowerCamelCase__ = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def snake_case (UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = tmp_path / """cache"""
lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase__ = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def snake_case (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
if issubclass(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ = parquet_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ = [parquet_path]
lowerCamelCase__ = tmp_path / """cache"""
lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase__ = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
def snake_case (UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=("train",) ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
lowerCamelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def snake_case (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ = tmp_path / """cache"""
lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__ = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def snake_case (UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ = tmp_path / """cache"""
lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase__ = features.copy() if features else default_expected_features
lowerCamelCase__ = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ = ParquetDatasetReader({"""train""": parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def snake_case (UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
if split:
lowerCamelCase__ = {split: parquet_path}
else:
lowerCamelCase__ = """train"""
lowerCamelCase__ = {"""train""": parquet_path, """test""": parquet_path}
lowerCamelCase__ = tmp_path / """cache"""
lowerCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase__ = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case (UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ = ParquetDatasetWriter(UpperCamelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
lowerCamelCase__ = pq.ParquetFile(tmp_path / """foo.parquet""" )
lowerCamelCase__ = pf.read()
assert dataset.data.table == output_table
def snake_case (UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ = str(shared_datadir / """test_image_rgb.jpg""" )
lowerCamelCase__ = {"""image""": [image_path]}
lowerCamelCase__ = Features({"""image""": Image()} )
lowerCamelCase__ = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase )
lowerCamelCase__ = ParquetDatasetWriter(UpperCamelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
lowerCamelCase__ = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__ = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def snake_case (UpperCamelCase : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
assert get_writer_batch_size(UpperCamelCase ) == expected
| 235
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = ort.SessionOptions()
lowerCamelCase__ = False
return options
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowerCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowerCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
lowerCamelCase__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase__ = """A red cat sitting on a park bench"""
lowerCamelCase__ = np.random.RandomState(0 )
lowerCamelCase__ = pipe(
prompt=a_ , image=a_ , mask_image=a_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=a_ , output_type="""np""" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 235
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : int , a_ : Any , a_ : Tuple=7 , a_ : Dict=3 , a_ : Any=18 , a_ : Tuple=30 , a_ : Optional[Any]=400 , a_ : Union[str, Any]=True , a_ : List[Any]=32 , a_ : int=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size_divisor
__snake_case = do_rescale
def A ( self : Tuple ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = GLPNImageProcessor if is_vision_available() else None
def A ( self : Any ):
"""simple docstring"""
__snake_case = GLPNImageProcessingTester(self )
@property
def A ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size_divisor" ) )
self.assertTrue(hasattr(a_ , "resample" ) )
self.assertTrue(hasattr(a_ , "do_rescale" ) )
def A ( self : List[str] ):
"""simple docstring"""
pass
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 69
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
_UpperCamelCase : Optional[int] = 'src/transformers'
# Matches is_xxx_available()
_UpperCamelCase : int = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_UpperCamelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_UpperCamelCase : List[str] = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_UpperCamelCase : Any = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_UpperCamelCase : Dict = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_UpperCamelCase : Tuple = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_UpperCamelCase : Union[str, Any] = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_UpperCamelCase : str = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_UpperCamelCase : Union[str, Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_UpperCamelCase : Optional[int] = re.compile(R'^\s*try:')
# Catches a line with else:
_UpperCamelCase : List[Any] = re.compile(R'^\s*else:')
def __UpperCAmelCase ( A : Any ) -> Tuple:
if _re_test_backend.search(A ) is None:
return None
UpperCAmelCase_ : str = [b[0] for b in _re_backend.findall(A )]
backends.sort()
return "_and_".join(A )
def __UpperCAmelCase ( A : List[Any] ) -> Optional[int]:
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ : str = f.readlines()
UpperCAmelCase_ : List[str] = 0
while line_index < len(A ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase_ : Tuple = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase_ : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A ):
UpperCAmelCase_ : str = _re_one_line_import_struct.search(A ).groups()[0]
UpperCAmelCase_ : Any = re.findall(r'''\[([^\]]+)\]''' , A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCAmelCase_ : Dict = _re_import_struct_key_value.search(A )
if single_line_import_search is not None:
UpperCAmelCase_ : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A ) > 0]
objects.extend(A )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase_ : int = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase_ : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCAmelCase_ : Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(A ) is not None:
objects.append(_re_import_struct_add_one.search(A ).groups()[0] )
elif _re_import_struct_add_many.search(A ) is not None:
UpperCAmelCase_ : Dict = _re_import_struct_add_many.search(A ).groups()[0].split(''', ''' )
UpperCAmelCase_ : Dict = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_between_brackets.search(A ) is not None:
UpperCAmelCase_ : int = _re_between_brackets.search(A ).groups()[0].split(''', ''' )
UpperCAmelCase_ : List[Any] = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_quote_object.search(A ) is not None:
objects.append(_re_quote_object.search(A ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCAmelCase_ : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase_ : Union[str, Any] = []
while (
line_index < len(A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCAmelCase_ : int = lines[line_index]
UpperCAmelCase_ : int = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase_ : int = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(A ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase_ : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCAmelCase_ : Tuple = lines[line_index]
UpperCAmelCase_ : Tuple = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCAmelCase_ : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( A : Dict , A : Union[str, Any] ) -> Optional[Any]:
def find_duplicates(A : Optional[int] ):
return [k for k, v in collections.Counter(A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase_ : str = []
for key in import_dict_objects.keys():
UpperCAmelCase_ : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase_ : Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase_ : Optional[int] = '''base imports''' if key == '''none''' else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = []
for root, _, files in os.walk(A ):
if "__init__.py" in files:
UpperCAmelCase_ : List[Any] = os.path.join(A , '''__init__.py''' )
UpperCAmelCase_ : str = parse_init(A )
if objects is not None:
UpperCAmelCase_ : List[str] = analyze_results(*A )
if len(A ) > 0:
UpperCAmelCase_ : Optional[Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append('''\n'''.join(A ) )
if len(A ) > 0:
raise ValueError('''\n\n'''.join(A ) )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Any = []
for path, directories, files in os.walk(A ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCAmelCase_ : Union[str, Any] = str((Path(A ) / folder).relative_to(A ) )
UpperCAmelCase_ : List[Any] = short_path.replace(os.path.sep , '''.''' )
submodules.append(A )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase_ : Optional[Any] = str((Path(A ) / fname).relative_to(A ) )
UpperCAmelCase_ : Union[str, Any] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(A )
return submodules
_UpperCamelCase : List[Any] = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def __UpperCAmelCase ( ) -> int:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
UpperCAmelCase_ : Dict = direct_transformers_import(A )
UpperCAmelCase_ : List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(A , '''__init__.py''' ) , '''r''' ) as f:
UpperCAmelCase_ : Dict = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , A ) ) )
UpperCAmelCase_ : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(A ) > 0:
UpperCAmelCase_ : Optional[Any] = '''\n'''.join(F"- {module}" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"{list_of_modules}\n"
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 541
| 0
|
"""simple docstring"""
import pytest
_lowerCAmelCase = """__dummy_dataset1__"""
_lowerCAmelCase = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def lowerCamelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCamelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = dataset_loading_script_name
_lowerCAmelCase : Union[str, Any] = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=_lowerCamelCase )
_lowerCAmelCase : Any = script_dir / f"""{script_name}.py"""
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
| 720
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.dummy_uncond_unet
lowerCAmelCase__ :int = PNDMScheduler()
lowerCAmelCase__ :Any = PNDMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pndm.to(__UpperCAmelCase )
pndm.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = pndm(generator=__UpperCAmelCase , num_inference_steps=2_0 , output_type='numpy' ).images
lowerCAmelCase__ :str = torch.manual_seed(0 )
lowerCAmelCase__ :Union[str, Any] = pndm(generator=__UpperCAmelCase , num_inference_steps=2_0 , output_type='numpy' , return_dict=__UpperCAmelCase )[0]
lowerCAmelCase__ :List[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ :Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = 'google/ddpm-cifar10-32'
lowerCAmelCase__ :Optional[Any] = UNetaDModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = PNDMScheduler()
lowerCAmelCase__ :Dict = PNDMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pndm.to(__UpperCAmelCase )
pndm.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ :str = pndm(generator=__UpperCAmelCase , output_type='numpy' ).images
lowerCAmelCase__ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ :int = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : Union[str, Any] = 3_84
lowercase_ : Union[str, Any] = 7
if "tiny" in model_name:
lowercase_ : int = 96
lowercase_ : List[Any] = (2, 2, 6, 2)
lowercase_ : Union[str, Any] = (3, 6, 12, 24)
elif "small" in model_name:
lowercase_ : Optional[int] = 96
lowercase_ : List[Any] = (2, 2, 18, 2)
lowercase_ : List[Any] = (3, 6, 12, 24)
elif "base" in model_name:
lowercase_ : Any = 1_28
lowercase_ : Tuple = (2, 2, 18, 2)
lowercase_ : Optional[int] = (4, 8, 16, 32)
lowercase_ : Union[str, Any] = 12
lowercase_ : Optional[int] = 5_12
elif "large" in model_name:
lowercase_ : Union[str, Any] = 1_92
lowercase_ : Any = (2, 2, 18, 2)
lowercase_ : int = (6, 12, 24, 48)
lowercase_ : Optional[Any] = 12
lowercase_ : Union[str, Any] = 7_68
# set label information
lowercase_ : Union[str, Any] = 1_50
lowercase_ : Union[str, Any] = 'huggingface/label-files'
lowercase_ : Any = 'ade20k-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : Any = {v: k for k, v in idalabel.items()}
lowercase_ : Any = SwinConfig(
embed_dim=__SCREAMING_SNAKE_CASE , depths=__SCREAMING_SNAKE_CASE , num_heads=__SCREAMING_SNAKE_CASE , window_size=__SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
lowercase_ : int = UperNetConfig(
backbone_config=__SCREAMING_SNAKE_CASE , auxiliary_in_channels=__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE , )
return config
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : str = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = val
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase_ : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase_ : Dict = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Union[str, Any] = in_proj_weight[:dim, :]
lowercase_ : List[str] = in_proj_bias[: dim]
lowercase_ : int = in_proj_weight[
dim : dim * 2, :
]
lowercase_ : List[Any] = in_proj_bias[
dim : dim * 2
]
lowercase_ : Optional[Any] = in_proj_weight[
-dim :, :
]
lowercase_ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ , lowercase_ : List[Any] = x.shape
lowercase_ : str = x.reshape(__SCREAMING_SNAKE_CASE , 4 , in_channel // 4 )
lowercase_ : Tuple = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return x
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ , lowercase_ : List[str] = x.shape
lowercase_ : List[str] = x.reshape(__SCREAMING_SNAKE_CASE , in_channel // 4 , 4 )
lowercase_ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return x
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[str] = x.shape[0]
lowercase_ : List[str] = x.reshape(4 , in_channel // 4 )
lowercase_ : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__SCREAMING_SNAKE_CASE )
return x
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = x.shape[0]
lowercase_ : List[str] = x.reshape(in_channel // 4 , 4 )
lowercase_ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__SCREAMING_SNAKE_CASE )
return x
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : int = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
lowercase_ : List[Any] = model_name_to_url[model_name]
lowercase_ : Optional[Any] = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' , file_name=__SCREAMING_SNAKE_CASE )[
'state_dict'
]
for name, param in state_dict.items():
print(__SCREAMING_SNAKE_CASE , param.shape )
lowercase_ : int = get_upernet_config(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = UperNetForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase_ : Any = state_dict.pop(__SCREAMING_SNAKE_CASE )
if "bn" in key:
lowercase_ : List[Any] = key.replace('bn' , 'batch_norm' )
lowercase_ : Optional[Any] = val
# rename keys
lowercase_ : Tuple = create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase_ : List[str] = reverse_correct_unfold_reduction_order(__SCREAMING_SNAKE_CASE )
if "norm" in key:
lowercase_ : str = reverse_correct_unfold_norm_order(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# verify on image
lowercase_ : Optional[int] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowercase_ : Union[str, Any] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
lowercase_ : Any = SegformerImageProcessor()
lowercase_ : str = processor(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase_ : Any = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
lowercase_ : Tuple = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
lowercase_ : Tuple = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
lowercase_ : Tuple = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"upernet-swin-{size}" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 425
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : List[str] = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = ['MobileNetV2FeatureExtractor']
__magic_name__ : Optional[int] = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 608
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase_ = Features({"""text""": Value("""string""" )} )
lowerCAmelCase_ = Features({"""labels""": ClassLabel} )
lowerCAmelCase_ = "text"
lowerCAmelCase_ = "labels"
def lowercase_ ( self , __UpperCamelCase ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def lowercase_ ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 608
| 1
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class a__ :
pass
| 361
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ):
# ===== initialization =====
snake_case__ : str = Mock()
snake_case__ : Dict = conn, Mock()
snake_case__ : List[str] = iter([1, None] )
snake_case__ : Tuple = lambda snake_case_ : next(snake_case_ )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=snake_case_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 297
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase_ : Any = 50_003
UpperCamelCase_ : Any = 50_002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = PLBartTokenizer
UpperCamelCase__ = None
UpperCamelCase__ = False
def lowerCAmelCase_ ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
a__ = PLBartTokenizer(UpperCAmelCase__ ,language_codes="base" ,keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : int ):
a__ = PLBartTokenizer(UpperCAmelCase__ ,language_codes="base" ,keep_accents=UpperCAmelCase__ )
a__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase__ ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
a__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
a__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
a__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
a__ = tokenizer.vocab_size
a__ = [tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) for x in range(end - 4 ,UpperCAmelCase__ )]
self.assertListEqual(UpperCAmelCase__ ,["__java__", "__python__", "__en_XX__", "<mask>"] )
a__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
a__ = tokenizer(UpperCAmelCase__ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCAmelCase__ ,skip_special_tokens=UpperCAmelCase__ ,clean_up_tokenization_spaces=UpperCAmelCase__ ) ,UpperCAmelCase__ ,)
def lowerCAmelCase_ ( self : str ):
a__ = PLBartTokenizer(UpperCAmelCase__ ,language_codes="multi" ,keep_accents=UpperCAmelCase__ )
a__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase__ ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
a__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
a__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
a__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
a__ = tokenizer.vocab_size
a__ = [tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) for x in range(end - 7 ,UpperCAmelCase__ )]
self.assertListEqual(
UpperCAmelCase__ ,["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
a__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
a__ = tokenizer(UpperCAmelCase__ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCAmelCase__ ,skip_special_tokens=UpperCAmelCase__ ,clean_up_tokenization_spaces=UpperCAmelCase__ ) ,UpperCAmelCase__ ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase__ = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase__ = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase__ = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] ):
a__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name ,language_codes="base" ,src_lang="python" ,tgt_lang="en_XX" )
a__ = 1
return cls
def lowerCAmelCase_ ( self : int ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] ,5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] ,5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] ,5_00_03 )
def lowerCAmelCase_ ( self : List[str] ):
a__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,UpperCAmelCase__ )
def lowerCAmelCase_ ( self : List[Any] ):
self.assertIn(UpperCAmelCase__ ,self.tokenizer.all_special_ids )
a__ = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
a__ = self.tokenizer.decode(UpperCAmelCase__ ,skip_special_tokens=UpperCAmelCase__ )
a__ = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ ,UpperCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token ,UpperCAmelCase__ )
def lowerCAmelCase_ ( self : List[str] ):
a__ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] ,UpperCAmelCase__ )
a__ = 10
a__ = self.tokenizer(UpperCAmelCase__ ,max_length=UpperCAmelCase__ ,truncation=UpperCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,UpperCAmelCase__ )
self.assertEqual(len(UpperCAmelCase__ ) ,UpperCAmelCase__ )
def lowerCAmelCase_ ( self : Any ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) ,[5_00_04, 5_00_01] )
def lowerCAmelCase_ ( self : Optional[int] ):
a__ = tempfile.mkdtemp()
a__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase__ )
a__ = PLBartTokenizer.from_pretrained(UpperCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,UpperCAmelCase__ )
@require_torch
def lowerCAmelCase_ ( self : Any ):
a__ = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=UpperCAmelCase__ ,return_tensors="pt" )
a__ = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] ,UpperCAmelCase__ )
self.assertEqual(batch.decoder_input_ids[1][-1] ,2 )
self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] )
@require_torch
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=UpperCAmelCase__ ,truncation=UpperCAmelCase__ ,max_length=len(self.expected_src_tokens ) ,return_tensors="pt" ,)
a__ = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
self.assertEqual((2, 26) ,batch.input_ids.shape )
self.assertEqual((2, 26) ,batch.attention_mask.shape )
a__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,UpperCAmelCase__ )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = self.tokenizer(self.src_text ,padding=UpperCAmelCase__ ,truncation=UpperCAmelCase__ ,max_length=3 ,return_tensors="pt" )
a__ = self.tokenizer(
text_target=self.tgt_text ,padding=UpperCAmelCase__ ,truncation=UpperCAmelCase__ ,max_length=10 ,return_tensors="pt" )
a__ = targets["input_ids"]
a__ = shift_tokens_right(UpperCAmelCase__ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
a__ = self.tokenizer._build_translation_inputs(
"A test" ,return_tensors="pt" ,src_lang="en_XX" ,tgt_lang="java" )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) ,{
# A, test, EOS, en_XX
"input_ids": [[1_50, 2_42, 2, 5_00_03]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_00_01,
} ,)
| 721
|
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCamelCase_ : str = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(_lowercase , tokenizer_name + "Fast" )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(_lowercase , force_download=_lowercase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split("/" )
a__ = os.path.join(_lowercase , _lowercase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(_lowercase )[-1][0]
if next_char == "/":
a__ = os.path.join(_lowercase , _lowercase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
_lowercase , legacy_format=_lowercase , filename_prefix=_lowercase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(_lowercase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
UpperCamelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
UpperCamelCase_ : List[Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 394
| 0
|
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = """"""
for i in table:
res += inp[i - 1]
return res
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return data[1:] + data[0]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = """"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = int("""0b""" + data[0] + data[-1] , 2 )
UpperCamelCase : Any = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = message[:4]
UpperCamelCase : int = message[4:]
UpperCamelCase : int = apply_table(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : str = xor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = apply_sbox(SCREAMING_SNAKE_CASE , temp[:4] ) # noqa: E741
UpperCamelCase : str = apply_sbox(SCREAMING_SNAKE_CASE , temp[4:] )
UpperCamelCase : Union[str, Any] = """0""" * (2 - len(SCREAMING_SNAKE_CASE )) + l # noqa: E741
UpperCamelCase : Union[str, Any] = """0""" * (2 - len(SCREAMING_SNAKE_CASE )) + r
UpperCamelCase : int = apply_table(l + r , SCREAMING_SNAKE_CASE )
UpperCamelCase : int = xor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return temp + right
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = input("""Enter 10 bit key: """)
__magic_name__ : Union[str, Any] = input("""Enter 8 bit message: """)
__magic_name__ : Dict = [6, 3, 7, 4, 8, 5, 1_0, 9]
__magic_name__ : str = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__magic_name__ : Optional[Any] = [2, 4, 3, 1]
__magic_name__ : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
__magic_name__ : str = [4, 1, 3, 5, 7, 2, 8, 6]
__magic_name__ : str = [4, 1, 2, 3, 2, 3, 4, 1]
__magic_name__ : Any = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__magic_name__ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__magic_name__ : Dict = apply_table(key, paa_table)
__magic_name__ : Union[str, Any] = temp[:5]
__magic_name__ : Optional[Any] = temp[5:]
__magic_name__ : str = left_shift(left)
__magic_name__ : List[str] = left_shift(right)
__magic_name__ : Tuple = apply_table(left + right, pa_table)
__magic_name__ : Dict = left_shift(left)
__magic_name__ : Union[str, Any] = left_shift(right)
__magic_name__ : List[Any] = left_shift(left)
__magic_name__ : str = left_shift(right)
__magic_name__ : List[str] = apply_table(left + right, pa_table)
# encryption
__magic_name__ : List[str] = apply_table(message, IP)
__magic_name__ : str = function(expansion, sa, sa, keya, temp)
__magic_name__ : str = temp[4:] + temp[:4]
__magic_name__ : Optional[Any] = function(expansion, sa, sa, keya, temp)
__magic_name__ : Any = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
__magic_name__ : Tuple = apply_table(CT, IP)
__magic_name__ : List[str] = function(expansion, sa, sa, keya, temp)
__magic_name__ : Dict = temp[4:] + temp[:4]
__magic_name__ : int = function(expansion, sa, sa, keya, temp)
__magic_name__ : Any = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 102
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ):
"""simple docstring"""
return self.decoder.get(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" )
return (vocab_file,)
| 48
| 0
|
"""simple docstring"""
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Tuple = word.split()
def justify(A__ ,A__ ,A__ ) -> str:
UpperCAmelCase_ : int = max_width - width
UpperCAmelCase_ : Any = len(A__ )
if len(A__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase_ : Union[str, Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase_ : Tuple = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase_ : Dict = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(A__ ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase_ : int = []
for i in range(A__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(A__ )
UpperCAmelCase_ : int = []
UpperCAmelCase_ : list[str] = []
UpperCAmelCase_ : Optional[Any] = 0
for word in words:
if width + len(A__ ) + len(A__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(A__ )
width += len(A__ )
else:
# justify the line and add it to result
answer.append(justify(A__ ,A__ ,A__ ) )
# reset new line and new width
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [word], len(A__ )
UpperCAmelCase_ : Union[str, Any] = max_width - width - len(A__ )
answer.append(" ".join(A__ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 463
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase_ : Tuple = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ : Any = model(lowerCAmelCase_ )["last_hidden_state"]
UpperCAmelCase_ : str = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice.
UpperCAmelCase_ : Tuple = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 463
| 1
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = FlaxAutoencoderKL
@property
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = (3_2, 3_2)
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE = jax.random.uniform(__lowerCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
| 418
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 551
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :Tuple = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :str = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Union[str, Any]=224 , lowerCAmelCase_ : List[Any]=30 , lowerCAmelCase_ : Any=400 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=[0.5, 0.5, 0.5] , lowerCAmelCase_ : str=[0.5, 0.5, 0.5] , ) -> Dict:
UpperCAmelCase_ : int = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : List[str] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : List[str] = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = ViTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "size" ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : str = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : Dict = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 95
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_a , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_a , )
lowerCamelCase = AutoencoderKL()
lowerCamelCase = DDIMScheduler()
lowerCamelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def _lowerCAmelCase ( self , _a , _a=0 ):
"""simple docstring"""
if str(_a ).startswith("""mps""" ):
lowerCamelCase = torch.manual_seed(_a )
else:
lowerCamelCase = torch.Generator(device=_a ).manual_seed(_a )
lowerCamelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """cpu"""
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCamelCase = self.get_dummy_inputs(_a )
lowerCamelCase = pipe(**_a ).images
lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowerCamelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
lowerCamelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
lowerCamelCase = pipe.get_label_ids(_a )
lowerCamelCase = pipe(_a , generator=_a , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(_a , _a ):
lowerCamelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
lowerCamelCase = ["""vase""", """umbrella"""]
lowerCamelCase = pipe.get_label_ids(_a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(_a , generator=_a , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(_a , _a ):
lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 543
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Tuple ={
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''ViTFeatureExtractor''']
snake_case = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103
|
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21
| 0
|
def _UpperCAmelCase ( ):
"""simple docstring"""
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
while i * i <= n:
SCREAMING_SNAKE_CASE_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _UpperCAmelCase ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 )
if __name__ == "__main__":
print(solution())
| 620
|
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index == number_of_items:
return 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
SCREAMING_SNAKE_CASE_ = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=3_2 , _A=3 , _A=1_0 , _A=[1_0, 2_0, 3_0, 4_0] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ):
'''simple docstring'''
UpperCamelCase : List[str] = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Any = image_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : str = embeddings_size
UpperCamelCase : Union[str, Any] = hidden_sizes
UpperCamelCase : str = depths
UpperCamelCase : Dict = is_training
UpperCamelCase : Any = use_labels
UpperCamelCase : Any = hidden_act
UpperCamelCase : Tuple = num_labels
UpperCamelCase : Any = scope
UpperCamelCase : Union[str, Any] = len(_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _a ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = TFRegNetModel(config=_A )
UpperCamelCase : Optional[Any] = model(_A , training=_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = self.num_labels
UpperCamelCase : Any = TFRegNetForImageClassification(_A )
UpperCamelCase : Any = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = config_and_inputs
UpperCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : int = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__lowerCAmelCase : Tuple = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase : int = False
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : str = False
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFRegNetModelTester(self )
UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _a ( self ):
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _a ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def _a ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[str] = model_class(_A )
UpperCamelCase : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _a ( self ):
'''simple docstring'''
def check_hidden_states_output(_A , _A , _A ):
UpperCamelCase : int = model_class(_A )
UpperCamelCase : Dict = model(**self._prepare_for_class(_A , _A ) , training=_A )
UpperCamelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase : Tuple = layer_type
UpperCamelCase : Dict = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_A , _A , _A , _A={} ):
UpperCamelCase : Tuple = model(_A , return_dict=_A , **_A )
UpperCamelCase : int = model(_A , return_dict=_A , **_A ).to_tuple()
def recursive_check(_A , _A ):
if isinstance(_A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_A , _A ):
recursive_check(_A , _A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_A , _A ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(_A , _A )
for model_class in self.all_model_classes:
UpperCamelCase : Any = model_class(_A )
UpperCamelCase : Union[str, Any] = self._prepare_for_class(_A , _A )
UpperCamelCase : str = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A )
UpperCamelCase : Union[str, Any] = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCamelCase : Any = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A )
UpperCamelCase : Any = self._prepare_for_class(_A , _A )
UpperCamelCase : str = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A , {"""output_hidden_states""": True} )
UpperCamelCase : List[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCamelCase : Optional[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A , {"""output_hidden_states""": True} )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _a ( self ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : int = TFRegNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase ():
UpperCamelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : List[Any] = prepare_img()
UpperCamelCase : Any = image_processor(images=_A , return_tensors="""tf""" )
# forward pass
UpperCamelCase : Optional[int] = model(**_A , training=_A )
# verify the logits
UpperCamelCase : List[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCamelCase : int = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1e-4 )
| 102
|
from datetime import datetime
import requests
def __a ( __lowerCAmelCase ) -> bytes:
SCREAMING_SNAKE_CASE : int = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
SCREAMING_SNAKE_CASE : Any = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(__lowerCAmelCase ).content
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input("""Enter Video/IGTV url: """).strip()
_lowerCamelCase : int = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 352
| 0
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=6_4 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_input_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =embedding_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =num_choices
_SCREAMING_SNAKE_CASE =scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MegatronBertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE =model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE =model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MegatronBertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MegatronBertForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MegatronBertForNextSentencePrediction(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MegatronBertForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , next_sentence_label=_lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MegatronBertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =MegatronBertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =MegatronBertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_choices
_SCREAMING_SNAKE_CASE =MegatronBertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE =model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
lowercase : int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase : List[Any] = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Union[str, Any] = True
# test_resize_embeddings = False
lowercase : List[Any] = False
def UpperCamelCase_ ( self , _A , _A , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_SCREAMING_SNAKE_CASE =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MegatronBertModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowerCAmelCase )
def _lowerCAmelCase(a : Optional[int] ) -> str:
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCAmelCase_ : Dict = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('''Model is not available.''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
_SCREAMING_SNAKE_CASE =os.path.join(os.environ['''MYDIR'''] , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE =MegatronBertModel.from_pretrained(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.half()
_SCREAMING_SNAKE_CASE =_long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(_lowerCAmelCase )[0]
_SCREAMING_SNAKE_CASE =torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
_SCREAMING_SNAKE_CASE =output[0, ii, jj]
_SCREAMING_SNAKE_CASE =expected[3 * ii + jj]
_SCREAMING_SNAKE_CASE ='''ii={} jj={} a={} b={}'''.format(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(math.isclose(_lowerCAmelCase , _lowerCAmelCase , rel_tol=_lowerCAmelCase , abs_tol=_lowerCAmelCase ) , msg=_lowerCAmelCase )
| 709
|
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , **_A ):
'''simple docstring'''
super().__init__(**_A )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , _A , **_A ):
'''simple docstring'''
return super().__call__(_A , **_A )
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
if "candidate_labels" in kwargs:
_SCREAMING_SNAKE_CASE =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
_SCREAMING_SNAKE_CASE =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCamelCase_ ( self , _A , _A=None , _A="This is a photo of {}." ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_image(_A )
_SCREAMING_SNAKE_CASE =self.image_processor(images=[image] , return_tensors=self.framework )
_SCREAMING_SNAKE_CASE =candidate_labels
_SCREAMING_SNAKE_CASE =[hypothesis_template.format(_A ) for x in candidate_labels]
_SCREAMING_SNAKE_CASE =self.tokenizer(_A , return_tensors=self.framework , padding=_A )
_SCREAMING_SNAKE_CASE =[text_inputs]
return inputs
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =model_inputs.pop('''candidate_labels''' )
_SCREAMING_SNAKE_CASE =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _A ):
_SCREAMING_SNAKE_CASE =text_inputs[0]
else:
# Batching case.
_SCREAMING_SNAKE_CASE =text_inputs[0][0]
_SCREAMING_SNAKE_CASE =self.model(**_A , **_A )
_SCREAMING_SNAKE_CASE ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =model_outputs.pop('''candidate_labels''' )
_SCREAMING_SNAKE_CASE =model_outputs['''logits'''][0]
if self.framework == "pt":
_SCREAMING_SNAKE_CASE =logits.softmax(dim=-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE =probs.tolist()
if not isinstance(_A , _A ):
_SCREAMING_SNAKE_CASE =[scores]
elif self.framework == "tf":
_SCREAMING_SNAKE_CASE =stable_softmax(_A , axis=-1 )
_SCREAMING_SNAKE_CASE =probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
_SCREAMING_SNAKE_CASE =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result
| 165
| 0
|
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : int , *,
lowercase__ : Any = 4 , lowercase__ : Dict = 768 , lowercase__ : List[str] , lowercase__ : Union[str, Any] , ) ->Any:
'''simple docstring'''
super().__init__()
_UpperCamelCase : Any = nn.Parameter(torch.zeros(_UpperCAmelCase ) )
# parameters for additional clip time embeddings
_UpperCamelCase : List[Any] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
_UpperCamelCase : Tuple = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
# parameters for encoder hidden states
_UpperCamelCase : str = clip_extra_context_tokens
_UpperCamelCase : Union[str, Any] = nn.Linear(
_UpperCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
_UpperCamelCase : int = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
_UpperCamelCase : int = nn.LayerNorm(_UpperCAmelCase )
def snake_case__ ( self : Optional[int] , *, lowercase__ : Dict , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_UpperCamelCase : Tuple = image_embeddings.shape[0]
_UpperCamelCase : Optional[int] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_UpperCamelCase : int = classifier_free_guidance_embeddings.expand(
_UpperCAmelCase , -1 )
_UpperCamelCase : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_UpperCamelCase : Any = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_UpperCamelCase : Dict = self.embedding_proj(_UpperCAmelCase )
_UpperCamelCase : Optional[Any] = self.clip_image_embeddings_project_to_time_embeddings(_UpperCAmelCase )
_UpperCamelCase : Optional[Any] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_UpperCamelCase : Dict = self.clip_extra_context_tokens_proj(_UpperCAmelCase )
_UpperCamelCase : Optional[int] = clip_extra_context_tokens.reshape(_UpperCAmelCase , -1 , self.clip_extra_context_tokens )
_UpperCamelCase : str = clip_extra_context_tokens.permute(0 , 2 , 1 )
_UpperCamelCase : Any = self.encoder_hidden_states_proj(_UpperCAmelCase )
_UpperCamelCase : Tuple = self.text_encoder_hidden_states_norm(_UpperCAmelCase )
_UpperCamelCase : str = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 435
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """vision-encoder-decoder"""
A_ = True
def __init__( self , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase_ = kwargs.pop('encoder' )
UpperCamelCase_ = encoder_config.pop('model_type' )
UpperCamelCase_ = kwargs.pop('decoder' )
UpperCamelCase_ = decoder_config.pop('model_type' )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = True
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ = True
UpperCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.encoder.to_dict()
UpperCamelCase_ = self.decoder.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape
UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ = dummy_input.pop('input_ids' )
UpperCamelCase_ = dummy_input.pop('attention_mask' )
UpperCamelCase_ = torch.zeros(_UpperCAmelCase )
return common_inputs
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig:
UpperCamelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 23
| 0
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 719
|
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__lowercase = len(_UpperCamelCase ) if (len(_UpperCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_UpperCamelCase ) , '''Postfix'''.center(_UpperCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCamelCase ) == 0:
stack.append(_UpperCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_UpperCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_UpperCamelCase )).ljust(_UpperCamelCase ) , (''''''.join(_UpperCamelCase )).ljust(_UpperCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_UpperCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_UpperCamelCase )).ljust(_UpperCamelCase ) , (''''''.join(_UpperCamelCase )).ljust(_UpperCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_UpperCamelCase ) # return Postfix as str
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_UpperCamelCase ) ):
if infix[i] == "(":
__lowercase = ''')''' # change "(" to ")"
elif infix[i] == ")":
__lowercase = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_UpperCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a : Union[str, Any] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
a : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 527
| 0
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int =16
_UpperCamelCase : List[Any] =32
def a__ (__lowercase :int ) -> List[Any]:
return int(x / 2**20 )
class UpperCAmelCase__ :
def __enter__( self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_A : List[Any] = torch.cuda.memory_allocated()
return self
def __exit__( self ,*A__ ):
gc.collect()
torch.cuda.empty_cache()
_A : Any = torch.cuda.memory_allocated()
_A : Dict = torch.cuda.max_memory_allocated()
_A : Optional[int] = bamb(self.end - self.begin )
_A : Optional[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a__ (__lowercase :Tuple , __lowercase :List[Any] = 16 , __lowercase :Union[str, Any] = "bert-base-cased" , __lowercase :Any = 320 , __lowercase :List[Any] = 160 , ) -> Tuple:
_A : List[str] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
_A : Any = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f"""train[:{n_train}]""", '''validation''': f"""validation[:{n_val}]"""} )
def tokenize_function(__lowercase :List[str] ):
# max_length=None => use the model max length (it's actually the default)
_A : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_A : Optional[int] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase :Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(_lowerCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_A : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
_A : Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a__ (__lowercase :int , __lowercase :Any ) -> Optional[int]:
# Initialize accelerator
_A : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A : int = config['''lr''']
_A : Optional[Any] = int(config['''num_epochs'''] )
_A : List[Any] = int(config['''seed'''] )
_A : List[Any] = int(config['''batch_size'''] )
_A : List[Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
_A , _A : Optional[Any] = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A : Dict = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
_A : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_A : Dict = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_A : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_A : Optional[Any] = 1
_A : str = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_A : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
_A : Optional[int] = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A , _A , _A , _A , _A : str = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_A : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
_A : Dict = 0
# Now we train the model
_A : Optional[int] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
_A : int = model(**_lowerCAmelCase )
_A : List[Any] = outputs.loss
_A : Tuple = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_A : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a__ () -> int:
_A : Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowerCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowerCAmelCase , )
parser.add_argument(
'''--output_dir''' , type=_lowerCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=_lowerCAmelCase , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=_lowerCAmelCase , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowerCAmelCase , default=1 , help='''Number of train epochs.''' , )
_A : str = parser.parse_args()
_A : str = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 206
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _lowerCAmelCase ( _lowerCAmelCase="ro" , _lowerCAmelCase="en" , _lowerCAmelCase="wmt16" , _lowerCAmelCase=None )-> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
__UpperCAmelCase = F'{src_lang}-{tgt_lang}'
print(F'Converting {dataset}-{pair}' )
__UpperCAmelCase = datasets.load_dataset(_lowerCAmelCase , _lowerCAmelCase )
if save_dir is None:
__UpperCAmelCase = F'{dataset}-{pair}'
__UpperCAmelCase = Path(_lowerCAmelCase )
save_dir.mkdir(exist_ok=_lowerCAmelCase )
for split in ds.keys():
print(F'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
__UpperCAmelCase = 'val' if split == 'validation' else split
__UpperCAmelCase = save_dir.joinpath(F'{fn}.source' )
__UpperCAmelCase = save_dir.joinpath(F'{fn}.target' )
__UpperCAmelCase = src_path.open('w+' )
__UpperCAmelCase = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__UpperCAmelCase = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(F'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 126
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
return 10 - x * x
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if equation(_UpperCamelCase ) * equation(_UpperCamelCase ) >= 0:
raise ValueError('''Wrong space!''' )
_lowercase: Any = a
while (b - a) >= 0.01:
# Find middle point
_lowercase: Optional[Any] = (a + b) / 2
# Check if middle point is root
if equation(_UpperCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_UpperCamelCase ) * equation(_UpperCamelCase ) < 0:
_lowercase: Optional[Any] = c
else:
_lowercase: Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 272
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] = logging.get_logger()
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True ):
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
_lowercase: int = timm.create_model('''levit_128s''' , pretrained=_UpperCamelCase )
else:
_lowercase: int = timm.create_model('''levit_128''' , pretrained=_UpperCamelCase )
if hidden_sizes == 192:
_lowercase: str = timm.create_model('''levit_192''' , pretrained=_UpperCamelCase )
if hidden_sizes == 256:
_lowercase: int = timm.create_model('''levit_256''' , pretrained=_UpperCamelCase )
if hidden_sizes == 384:
_lowercase: Dict = timm.create_model('''levit_384''' , pretrained=_UpperCamelCase )
from_model.eval()
_lowercase: Any = LevitForImageClassificationWithTeacher(_UpperCamelCase ).eval()
_lowercase: Union[str, Any] = OrderedDict()
_lowercase: Optional[Any] = from_model.state_dict()
_lowercase: List[Any] = list(from_model.state_dict().keys() )
_lowercase: int = list(our_model.state_dict().keys() )
print(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for i in range(len(_UpperCamelCase ) ):
_lowercase: Any = weights[og_keys[i]]
our_model.load_state_dict(_UpperCamelCase )
_lowercase: int = torch.randn((2, 3, 224, 224) )
_lowercase: List[str] = from_model(_UpperCamelCase )
_lowercase: Optional[int] = our_model(_UpperCamelCase ).logits
assert torch.allclose(_UpperCamelCase , _UpperCamelCase ), "The model logits don't match the original one."
_lowercase: Union[str, Any] = name
print(_UpperCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_lowercase: Dict = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True ):
"""simple docstring"""
_lowercase: int = '''imagenet-1k-id2label.json'''
_lowercase: int = 1_000
_lowercase: Tuple = (1, num_labels)
_lowercase: Dict = '''huggingface/label-files'''
_lowercase: Optional[int] = num_labels
_lowercase: List[str] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowercase: List[str] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
_lowercase: int = idalabel
_lowercase: Tuple = {v: k for k, v in idalabel.items()}
_lowercase: Tuple = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
_lowercase: Union[str, Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
_lowercase: int = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _UpperCamelCase , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
A__ : Optional[Any] = parser.parse_args()
A__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 272
| 1
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowercase ( __snake_case ):
UpperCamelCase = '''M-CLIP'''
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[Any]=1_0_2_4 , __lowerCamelCase : List[str]=7_6_8 , **__lowerCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = transformerDimSize
UpperCAmelCase = imageDimSize
super().__init__(**__lowerCamelCase )
class __lowercase ( __snake_case ):
UpperCamelCase = MCLIPConfig
def __init__( self : List[str] , __lowerCamelCase : Dict , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
super().__init__(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase = XLMRobertaModel(__lowerCamelCase )
UpperCAmelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowercase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase = self.transformer(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
UpperCAmelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__lowerCamelCase ), embs
| 377
|
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any:
UpperCAmelCase = 0
UpperCAmelCase = len(lowerCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any:
if len(lowerCAmelCase_ ) <= 1:
return arr, 0
UpperCAmelCase = len(lowerCAmelCase_ ) // 2
UpperCAmelCase = arr[0:mid]
UpperCAmelCase = arr[mid:]
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
UpperCAmelCase = []
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = 0
while i < len(lowerCAmelCase_ ) and j < len(lowerCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _UpperCamelCase ( ) ->int:
UpperCAmelCase = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , lowerCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase_ )
# an empty list should also have zero inversions
UpperCAmelCase = []
UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 377
| 1
|
def UpperCamelCase ( lowerCAmelCase_ ) -> int:
'''simple docstring'''
_A= 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def UpperCamelCase ( lowerCAmelCase_ ) -> int:
'''simple docstring'''
_A= 0
while number > 0:
_A= number % 10
sum_of_digits += last_digit
_A= number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCamelCase ( lowerCAmelCase_ = 1_00 ) -> int:
'''simple docstring'''
_A= factorial(lowerCAmelCase_ )
_A= split_and_add(lowerCAmelCase_ )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 476
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : List[Any] ="""bridgetower_vision_model"""
def __init__( self , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=288 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_A= hidden_size
_A= num_hidden_layers
_A= num_channels
_A= patch_size
_A= image_size
_A= initializer_factor
_A= layer_norm_eps
_A= stop_gradient
_A= share_layernorm
_A= remove_last_layer
@classmethod
def a__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
_A, _A= cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get('model_type' ) == "bridgetower":
_A= config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : List[Any] ="""bridgetower_text_model"""
def __init__( self , lowerCAmelCase__=50265 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=1 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=514 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_A= vocab_size
_A= hidden_size
_A= num_hidden_layers
_A= num_attention_heads
_A= hidden_act
_A= initializer_factor
_A= intermediate_size
_A= hidden_dropout_prob
_A= attention_probs_dropout_prob
_A= max_position_embeddings
_A= type_vocab_size
_A= layer_norm_eps
_A= position_embedding_type
_A= use_cache
_A= pad_token_id
_A= bos_token_id
_A= eos_token_id
@classmethod
def a__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
_A, _A= cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get('model_type' ) == "bridgetower":
_A= config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : Dict ="""bridgetower"""
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=768 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=False , lowerCAmelCase__="add" , lowerCAmelCase__=12 , lowerCAmelCase__=6 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
# TODO: remove this once the Hub files are updated.
_A= kwargs.pop('text_config_dict' , lowerCAmelCase__ )
_A= kwargs.pop('vision_config_dict' , lowerCAmelCase__ )
super().__init__(**lowerCAmelCase__ )
_A= share_cross_modal_transformer_layers
_A= hidden_act
_A= hidden_size
_A= initializer_factor
_A= layer_norm_eps
_A= share_link_tower_layers
_A= link_tower_type
_A= num_attention_heads
_A= num_hidden_layers
_A= tie_word_embeddings
_A= init_layernorm_from_vision_encoder
if text_config is None:
_A= {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
_A= {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
_A= BridgeTowerTextConfig(**lowerCAmelCase__ )
_A= BridgeTowerVisionConfig(**lowerCAmelCase__ )
@classmethod
def a__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__ )
def a__ ( self ):
_A= copy.deepcopy(self.__dict__ )
_A= self.text_config.to_dict()
_A= self.vision_config.to_dict()
_A= self.__class__.model_type
return output
| 476
| 1
|
from __future__ import annotations
def _a ( lowerCAmelCase , lowerCAmelCase )-> Dict:
if nth_term == "":
return [""]
SCREAMING_SNAKE_CASE_ = int(lowercase_ )
SCREAMING_SNAKE_CASE_ = int(lowercase_ )
SCREAMING_SNAKE_CASE_ = []
for temp in range(int(lowercase_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(lowercase_ ) )}''' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE: Dict = int(input('''Enter the last number (nth term) of the P-Series'''))
SCREAMING_SNAKE_CASE: Optional[Any] = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 360
|
'''simple docstring'''
def __UpperCamelCase ( lowercase_ : list[int] , lowercase_ : list[int] , lowercase_ : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowercase_ ) )
def __UpperCamelCase ( lowercase_ : list[list[int]] , lowercase_ : int , lowercase_ : list[int] , lowercase_ : int ):
"""simple docstring"""
if index == len(lowercase_ ):
return True
# Recursive Step
for i in range(lowercase_ ):
if valid_coloring(graph[index] , lowercase_ , lowercase_ ):
# Color current vertex
a_ = i
# Validate coloring
if util_color(lowercase_ , lowercase_ , lowercase_ , index + 1 ):
return True
# Backtrack
a_ = -1
return False
def __UpperCamelCase ( lowercase_ : list[list[int]] , lowercase_ : int ):
"""simple docstring"""
a_ = [-1] * len(lowercase_ )
if util_color(lowercase_ , lowercase_ , lowercase_ , 0 ):
return colored_vertices
return []
| 536
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCAmelCase_ : Union[str, Any] = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class lowercase ( unittest.TestCase , __lowerCamelCase ):
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
lowercase_ = load_tool("text-question-answering")
self.tool.setup()
lowercase_ = load_tool("text-question-answering" , remote=__lowerCAmelCase)
def __UpperCAmelCase ( self : Dict) -> int:
lowercase_ = self.tool(__lowerCAmelCase , "What did Hugging Face do in April 2021?")
self.assertEqual(__lowerCAmelCase , "launched the BigScience Research Workshop")
def __UpperCAmelCase ( self : int) -> Dict:
lowercase_ = self.remote_tool(__lowerCAmelCase , "What did Hugging Face do in April 2021?")
self.assertEqual(__lowerCAmelCase , "launched the BigScience Research Workshop")
def __UpperCAmelCase ( self : Optional[int]) -> int:
lowercase_ = self.tool(text=__lowerCAmelCase , question="What did Hugging Face do in April 2021?")
self.assertEqual(__lowerCAmelCase , "launched the BigScience Research Workshop")
def __UpperCAmelCase ( self : List[Any]) -> str:
lowercase_ = self.remote_tool(text=__lowerCAmelCase , question="What did Hugging Face do in April 2021?")
self.assertEqual(__lowerCAmelCase , "launched the BigScience Research Workshop")
| 461
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase_ : Optional[List[str]] = None
lowerCAmelCase_ : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase_ : Any = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class lowercase :
lowerCamelCase_ =True
lowerCamelCase_ =None
# Automatically constructed
lowerCamelCase_ ="PIL.Image.Image"
lowerCamelCase_ =pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCamelCase_ =field(default='Image' , init=__lowerCamelCase , repr=__lowerCamelCase )
def __call__( self : List[Any]) -> List[Any]:
return self.pa_type
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowercase_ = np.array(__lowerCAmelCase)
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
return {"path": value, "bytes": None}
elif isinstance(__lowerCAmelCase , __lowerCAmelCase):
return {"path": None, "bytes": value}
elif isinstance(__lowerCAmelCase , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCAmelCase)
elif isinstance(__lowerCAmelCase , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCAmelCase)
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.')
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCAmelCase : dict , __lowerCAmelCase : Dict=None) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'.")
if token_per_repo_id is None:
lowercase_ = {}
lowercase_ , lowercase_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.')
else:
if is_local_path(__lowerCAmelCase):
lowercase_ = PIL.Image.open(__lowerCAmelCase)
else:
lowercase_ = path.split("::")[-1]
try:
lowercase_ = string_to_dict(__lowerCAmelCase , config.HUB_DATASETS_URL)["repo_id"]
lowercase_ = token_per_repo_id.get(__lowerCAmelCase)
except ValueError:
lowercase_ = None
with xopen(__lowerCAmelCase , "rb" , use_auth_token=__lowerCAmelCase) as f:
lowercase_ = BytesIO(f.read())
lowercase_ = PIL.Image.open(bytes_)
else:
lowercase_ = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def __UpperCAmelCase ( self : Tuple) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def __UpperCAmelCase ( self : str , __lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.binary())
lowercase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
lowercase_ = storage.field("bytes")
else:
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.binary())
if storage.type.get_field_index("path") >= 0:
lowercase_ = storage.field("path")
else:
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
lowercase_ = pa.array(
[encode_np_array(np.array(__lowerCAmelCase))["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(__lowerCAmelCase , self.pa_type)
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__lowerCAmelCase : int):
with xopen(__lowerCAmelCase , "rb") as f:
lowercase_ = f.read()
return bytes_
lowercase_ = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ = pa.array(
[os.path.basename(__lowerCAmelCase) if path is not None else None for path in storage.field("path").to_pylist()] , type=pa.string() , )
lowercase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(__lowerCAmelCase , self.pa_type)
def __a ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __a ( __lowerCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
lowercase_ = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ = image.format
else:
lowercase_ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__lowerCamelCase , format=__lowerCamelCase )
return buffer.getvalue()
def __a ( __lowerCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(__lowerCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def __a ( __lowerCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
lowercase_ = array.dtype
lowercase_ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowercase_ = dtype.kind
lowercase_ = dtype.itemsize
lowercase_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ = dtype_byteorder + dtype_kind + str(__lowerCamelCase )
lowercase_ = np.dtype(__lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
lowercase_ = PIL.Image.fromarray(array.astype(__lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def __a ( __lowerCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
lowercase_ , lowercase_ = first_non_null_value(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__lowerCamelCase , np.ndarray ):
lowercase_ = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
lowercase_ = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 461
| 1
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
snake_case = logging.get_logger(__name__)
# General docstring
snake_case = "RegNetConfig"
# Base docstring
snake_case = "facebook/regnet-y-040"
snake_case = [1, 1_0_8_8, 7, 7]
# Image classification docstring
snake_case = "facebook/regnet-y-040"
snake_case = "tabby, tabby cat"
snake_case = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int ,__A : Optional[int] ,__A : Optional[Any] = 3 ,__A : List[Any] = 1 ,__A : List[Any] = 1 ,__A : Dict = "relu" ,**__A : Optional[int] ,) -> Union[str, Any]:
super().__init__(**a__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowercase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowercase = tf.keras.layers.ConvaD(
filters=a__ ,kernel_size=a__ ,strides=a__ ,padding='VALID' ,groups=a__ ,use_bias=a__ ,name='convolution' ,)
_lowercase = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='normalization' )
_lowercase = ACTaFN[activation] if activation is not None else tf.identity
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[Any] ) -> Union[str, Any]:
_lowercase = self.convolution(self.padding(a__ ) )
_lowercase = self.normalization(a__ )
_lowercase = self.activation(a__ )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str ,__A : Optional[int] ,**__A : Tuple ) -> Dict:
super().__init__(**a__ )
_lowercase = config.num_channels
_lowercase = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def __UpperCAmelCase ( self : str ,__A : int ) -> List[Any]:
_lowercase = shape_list(a__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowercase = tf.transpose(a__ ,perm=(0, 2, 3, 1) )
_lowercase = self.embedder(a__ )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : Optional[int] ,__A : int = 2 ,**__A : List[Any] ) -> Optional[Any]:
super().__init__(**a__ )
_lowercase = tf.keras.layers.ConvaD(
filters=a__ ,kernel_size=1 ,strides=a__ ,use_bias=a__ ,name='convolution' )
_lowercase = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='normalization' )
def __UpperCAmelCase ( self : str ,__A : Optional[Any] ,__A : Dict = False ) -> tf.Tensor:
return self.normalization(self.convolution(a__ ) ,training=a__ )
class A_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : Optional[Any] ,__A : List[Any] ,**__A : Tuple ) -> Tuple:
super().__init__(**a__ )
_lowercase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a__ ,name='pooler' )
_lowercase = [
tf.keras.layers.ConvaD(filters=a__ ,kernel_size=1 ,activation='relu' ,name='attention.0' ),
tf.keras.layers.ConvaD(filters=a__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2' ),
]
def __UpperCAmelCase ( self : Tuple ,__A : str ) -> Optional[Any]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_lowercase = self.pooler(a__ )
for layer_module in self.attention:
_lowercase = layer_module(a__ )
_lowercase = hidden_state * pooled
return hidden_state
class A_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Optional[int] ,__A : int ,__A : Union[str, Any] ,__A : Union[str, Any] = 1 ,**__A : Dict ) -> List[Any]:
super().__init__(**a__ )
_lowercase = in_channels != out_channels or stride != 1
_lowercase = max(1 ,out_channels // config.groups_width )
_lowercase = (
TFRegNetShortCut(a__ ,stride=a__ ,name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowercase = [
TFRegNetConvLayer(a__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0' ),
TFRegNetConvLayer(
a__ ,stride=a__ ,groups=a__ ,activation=config.hidden_act ,name='layer.1' ),
TFRegNetConvLayer(a__ ,kernel_size=1 ,activation=a__ ,name='layer.2' ),
]
_lowercase = ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self : int ,__A : Tuple ) -> Dict:
_lowercase = hidden_state
for layer_module in self.layers:
_lowercase = layer_module(a__ )
_lowercase = self.shortcut(a__ )
hidden_state += residual
_lowercase = self.activation(a__ )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int ,__A : Optional[Any] ,__A : Union[str, Any] ,__A : List[Any] ,__A : Dict = 1 ,**__A : Dict ) -> Tuple:
super().__init__(**a__ )
_lowercase = in_channels != out_channels or stride != 1
_lowercase = max(1 ,out_channels // config.groups_width )
_lowercase = (
TFRegNetShortCut(a__ ,stride=a__ ,name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut' )
)
_lowercase = [
TFRegNetConvLayer(a__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0' ),
TFRegNetConvLayer(
a__ ,stride=a__ ,groups=a__ ,activation=config.hidden_act ,name='layer.1' ),
TFRegNetSELayer(a__ ,reduced_channels=int(round(in_channels / 4 ) ) ,name='layer.2' ),
TFRegNetConvLayer(a__ ,kernel_size=1 ,activation=a__ ,name='layer.3' ),
]
_lowercase = ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self : List[str] ,__A : str ) -> List[str]:
_lowercase = hidden_state
for layer_module in self.layers:
_lowercase = layer_module(a__ )
_lowercase = self.shortcut(a__ )
hidden_state += residual
_lowercase = self.activation(a__ )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : Optional[int] ,__A : Any ,__A : Dict ,__A : Optional[Any] = 2 ,__A : Dict = 2 ,**__A : int ) -> Union[str, Any]:
super().__init__(**a__ )
_lowercase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_lowercase = [
# downsampling is done in the first layer with stride of 2
layer(a__ ,a__ ,a__ ,stride=a__ ,name='layers.0' ),
*[layer(a__ ,a__ ,a__ ,name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __UpperCAmelCase ( self : List[str] ,__A : Optional[Any] ) -> Dict:
for layer_module in self.layers:
_lowercase = layer_module(a__ )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple ,__A : int ,**__A : Optional[Any] ) -> Dict:
super().__init__(**a__ )
_lowercase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
a__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,) )
_lowercase = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(a__ ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(a__ ,a__ ,a__ ,depth=a__ ,name=F"""stages.{i+1}""" ) )
def __UpperCAmelCase ( self : Dict ,__A : Tuple ,__A : int = False ,__A : Union[str, Any] = True ) -> TFBaseModelOutputWithNoAttention:
_lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowercase = hidden_states + (hidden_state,)
_lowercase = stage_module(a__ )
if output_hidden_states:
_lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=a__ ,hidden_states=a__ )
@keras_serializable
class A_ ( tf.keras.layers.Layer ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = RegNetConfig
def __init__( self : int ,__A : int ,**__A : str ) -> str:
super().__init__(**a__ )
_lowercase = config
_lowercase = TFRegNetEmbeddings(a__ ,name='embedder' )
_lowercase = TFRegNetEncoder(a__ ,name='encoder' )
_lowercase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a__ ,name='pooler' )
@unpack_inputs
def __UpperCAmelCase ( self : str ,__A : Optional[int] ,__A : Tuple = None ,__A : str = None ,__A : Union[str, Any] = False ,) -> TFBaseModelOutputWithPoolingAndNoAttention:
_lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase = self.embedder(a__ ,training=a__ )
_lowercase = self.encoder(
a__ ,output_hidden_states=a__ ,return_dict=a__ ,training=a__ )
_lowercase = encoder_outputs[0]
_lowercase = self.pooler(a__ )
# Change to NCHW output format have uniformity in the modules
_lowercase = tf.transpose(a__ ,perm=(0, 3, 1, 2) )
_lowercase = tf.transpose(a__ ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowercase = tuple([tf.transpose(a__ ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a__ ,pooler_output=a__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class A_ ( __snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = RegNetConfig
SCREAMING_SNAKE_CASE_ : Any = '''regnet'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''pixel_values'''
@property
def __UpperCAmelCase ( self : List[Any] ) -> int:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) ,dtype=tf.floataa )}
snake_case = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
snake_case = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __snake_case , )
class A_ ( __snake_case ):
"""simple docstring"""
def __init__( self : List[str] ,__A : Union[str, Any] ,*__A : List[Any] ,**__A : List[str] ) -> Optional[Any]:
super().__init__(a__ ,*a__ ,**a__ )
_lowercase = TFRegNetMainLayer(a__ ,name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=a__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def __UpperCAmelCase ( self : Optional[int] ,__A : Tuple ,__A : Union[str, Any] = None ,__A : Dict = None ,__A : int=False ,) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
_lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase = self.regnet(
pixel_values=a__ ,output_hidden_states=a__ ,return_dict=a__ ,training=a__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'''\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , __snake_case , )
class A_ ( __snake_case , __snake_case ):
"""simple docstring"""
def __init__( self : Dict ,__A : Dict ,*__A : List[Any] ,**__A : List[str] ) -> str:
super().__init__(a__ ,*a__ ,**a__ )
_lowercase = config.num_labels
_lowercase = TFRegNetMainLayer(a__ ,name='regnet' )
# classification head
_lowercase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=a__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple = None ,__A : str = None ,__A : Optional[int] = None ,__A : str = None ,__A : List[str]=False ,) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
_lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase = self.regnet(
a__ ,output_hidden_states=a__ ,return_dict=a__ ,training=a__ )
_lowercase = outputs.pooler_output if return_dict else outputs[1]
_lowercase = self.classifier[0](a__ )
_lowercase = self.classifier[1](a__ )
_lowercase = None if labels is None else self.hf_compute_loss(labels=a__ ,logits=a__ )
if not return_dict:
_lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=a__ ,logits=a__ ,hidden_states=outputs.hidden_states )
| 67
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowercase : Optional[int] = False
@skip_mps
class _UpperCamelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase = False
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _UpperCAmelCase ( cls ) -> List[Any]:
super().setUpClass()
torch.use_deterministic_algorithms(a__ )
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(a__ )
def _UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
A = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A = CLIPTextModel(a__ )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCAmelCase ( self , a__ , a__=0 ) -> Optional[Any]:
if str(a__ ).startswith("""mps""" ):
A = torch.manual_seed(a__ )
else:
A = torch.Generator(device=a__ ).manual_seed(a__ )
A = A = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = """cpu"""
A = self.get_dummy_components()
A = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A = self.get_dummy_inputs(a__ )
A = pipe(**a__ ).images
A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def _UpperCAmelCase ( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def _UpperCAmelCase ( self ) -> Dict:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _UpperCAmelCase ( self ) -> str:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def _UpperCAmelCase ( self ) -> int:
super().test_save_load_local(expected_max_difference=5e-4 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(a__ )
@classmethod
def _UpperCAmelCase ( cls ) -> Dict:
super().tearDownClass()
torch.use_deterministic_algorithms(a__ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> int:
A = torch.manual_seed(51 )
A = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=a__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
A = """a painting of an elephant with glasses"""
A = [5, 7]
A = pipe(
prompt=a__ , token_indices=a__ , guidance_scale=7.5 , generator=a__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 641
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def _lowercase ( __snake_case ) -> list[str]:
if not isinstance(__snake_case ,__snake_case ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__snake_case ) )]
def _lowercase ( __snake_case ) -> BWTTransformDict:
if not isinstance(__snake_case ,__snake_case ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
__lowerCAmelCase : List[str] = all_rotations(__snake_case )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__lowerCAmelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__snake_case ),
}
return response
def _lowercase ( __snake_case ,__snake_case ) -> str:
if not isinstance(__snake_case ,__snake_case ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
__lowerCAmelCase : List[str] = int(__snake_case )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__snake_case ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
__lowerCAmelCase : Any = [""] * len(__snake_case )
for _ in range(len(__snake_case ) ):
for i in range(len(__snake_case ) ):
__lowerCAmelCase : List[Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__snake_case : str = 'Provide a string that I will generate its BWT transform: '
__snake_case : List[Any] = input(entry_msg).strip()
__snake_case : Optional[Any] = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
__snake_case : List[Any] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 615
|
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> list:
__lowerCAmelCase : List[str] = len(__snake_case )
__lowerCAmelCase : Dict = []
for i in range(len(__snake_case ) - pat_len + 1 ):
__lowerCAmelCase : List[Any] = True
for j in range(__snake_case ):
if s[i + j] != pattern[j]:
__lowerCAmelCase : Union[str, Any] = False
break
if match_found:
position.append(__snake_case )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 615
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Dict = KandinskyVaaControlnetImgaImgPipeline
_lowercase : int = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_lowercase : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_lowercase : Dict = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase : str = False
@property
def _lowercase ( self ):
"""simple docstring"""
return 32
@property
def _lowercase ( self ):
"""simple docstring"""
return 32
@property
def _lowercase ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase ( self ):
"""simple docstring"""
return 100
@property
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase = UNetaDConditionModel(**_lowercase )
return model
@property
def _lowercase ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.dummy_unet
_lowerCAmelCase = self.dummy_movq
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase = DDIMScheduler(**_lowercase )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowercase ( self , _lowercase , _lowercase=0 ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
# create init_image
_lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
_lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
if str(_lowercase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_lowercase )
else:
_lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowercase )
_lowerCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(_lowercase ) )
_lowerCAmelCase = output.images
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase = init_image.resize((512, 512) )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
_lowerCAmelCase = torch.from_numpy(np.array(_lowercase ) ).float() / 255.0
_lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_lowerCAmelCase = """A robot, 4k photo"""
_lowerCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
_lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase = pipe_prior(
_lowercase , image=_lowercase , strength=0.85 , generator=_lowercase , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase = pipeline(
image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , hint=_lowercase , generator=_lowercase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 5
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Any =get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : int = GPTSwaTokenizer
lowercase : Union[str, Any] = False
lowercase : Dict = True
lowercase : int = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
A : Dict =GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
A : Union[str, Any] ='This is a test'
A : str ='This is a test'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
A : int ='<s>'
A : Optional[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
A : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 20_00 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
A : Union[str, Any] =GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
A : Dict =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
A : int =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
A : List[str] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
A : Dict =GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
A : Tuple =['This is a test', 'I was born in 92000, and this is falsé.']
A : int =[
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Optional[int] =[
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
A : Any ={'input_ids': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=SCREAMING_SNAKE_CASE__ , )
| 305
| 0
|
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def lowerCAmelCase_ ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 710
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: Tuple = """funnel"""
__A: Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Tuple , _lowerCamelCase : Optional[Any]=3_05_22 , _lowerCamelCase : Any=[4, 4, 4] , _lowerCamelCase : Dict=None , _lowerCamelCase : List[str]=2 , _lowerCamelCase : int=7_68 , _lowerCamelCase : Optional[Any]=12 , _lowerCamelCase : Any=64 , _lowerCamelCase : Union[str, Any]=30_72 , _lowerCamelCase : Optional[Any]="gelu_new" , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : str=0.1 , _lowerCamelCase : Any=None , _lowerCamelCase : Any=1E-9 , _lowerCamelCase : str="mean" , _lowerCamelCase : str="relative_shift" , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=True , _lowerCamelCase : int=True , **_lowerCamelCase : Union[str, Any] , ):
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Optional[Any] = block_sizes
_UpperCAmelCase : str = [1] * len(_lowerCamelCase ) if block_repeats is None else block_repeats
assert len(_lowerCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_UpperCAmelCase : List[str] = num_decoder_layers
_UpperCAmelCase : str = d_model
_UpperCAmelCase : int = n_head
_UpperCAmelCase : str = d_head
_UpperCAmelCase : List[Any] = d_inner
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[str] = initializer_std
_UpperCAmelCase : List[str] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_UpperCAmelCase : Union[str, Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_UpperCAmelCase : str = attention_type
_UpperCAmelCase : Union[str, Any] = separate_cls
_UpperCAmelCase : List[str] = truncate_seq
_UpperCAmelCase : Optional[int] = pool_q_only
super().__init__(**_lowerCamelCase )
@property
def a__ ( self : Dict ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def a__ ( self : List[Any] , _lowerCamelCase : Any ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def a__ ( self : Optional[int] ):
return len(self.block_sizes )
@num_blocks.setter
def a__ ( self : List[str] , _lowerCamelCase : Any ):
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 328
| 0
|
"""simple docstring"""
import argparse
import os
import re
lowerCamelCase__ : Union[str, Any] = "src/transformers"
# Pattern that looks at the indentation in a line.
lowerCamelCase__ : Tuple = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCamelCase__ : Union[str, Any] = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCamelCase__ : Optional[Any] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCamelCase__ : str = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCamelCase__ : List[str] = re.compile(r"\[([^\]]+)\]")
def __A ( a_ : Tuple )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = _re_indent.search(a_ )
return "" if search is None else search.groups()[0]
def __A ( a_ : Union[str, Any] , a_ : Dict="" , a_ : Dict=None , a_ : List[Any]=None )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Any = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(a_ ):
index += 1
SCREAMING_SNAKE_CASE : int = ['''\n'''.join(lines[:index] )]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
SCREAMING_SNAKE_CASE : List[str] = [lines[index]]
index += 1
while index < len(a_ ) and (end_prompt is None or not lines[index].startswith(a_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(a_ ) )
if index < len(a_ ) - 1:
SCREAMING_SNAKE_CASE : List[Any] = [lines[index + 1]]
index += 1
else:
SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
blocks.append('''\n'''.join(a_ ) )
SCREAMING_SNAKE_CASE : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a_ ) > 0:
blocks.append('''\n'''.join(a_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def __A ( a_ : Any )-> Optional[Any]:
'''simple docstring'''
def _inner(a_ : List[str] ):
return key(a_ ).lower().replace('''_''' , '''''' )
return _inner
def __A ( a_ : Union[str, Any] , a_ : Optional[int]=None )-> Optional[int]:
'''simple docstring'''
def noop(a_ : Any ):
return x
if key is None:
SCREAMING_SNAKE_CASE : Any = noop
# Constants are all uppercase, they go first.
SCREAMING_SNAKE_CASE : Tuple = [obj for obj in objects if key(a_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
SCREAMING_SNAKE_CASE : Union[str, Any] = [obj for obj in objects if key(a_ )[0].isupper() and not key(a_ ).isupper()]
# Functions begin with a lowercase, they go last.
SCREAMING_SNAKE_CASE : Optional[Any] = [obj for obj in objects if not key(a_ )[0].isupper()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ignore_underscore(a_ )
return sorted(a_ , key=a_ ) + sorted(a_ , key=a_ ) + sorted(a_ , key=a_ )
def __A ( a_ : Any )-> Optional[Any]:
'''simple docstring'''
def _replace(a_ : Tuple ):
SCREAMING_SNAKE_CASE : int = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
SCREAMING_SNAKE_CASE : Dict = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE : List[str] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(a_ )] ) + "]"
SCREAMING_SNAKE_CASE : Union[str, Any] = import_statement.split('''\n''' )
if len(a_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
SCREAMING_SNAKE_CASE : List[str] = 2 if lines[1].strip() == '''[''' else 1
SCREAMING_SNAKE_CASE : Union[str, Any] = [(i, _re_strip_line.search(a_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
SCREAMING_SNAKE_CASE : Optional[Any] = sort_objects(a_ , key=lambda a_ : x[1] )
SCREAMING_SNAKE_CASE : str = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
SCREAMING_SNAKE_CASE : Any = _re_bracket_content.sub(_replace , lines[1] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = keys[:-1]
SCREAMING_SNAKE_CASE : str = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(a_ )] )
return "\n".join(a_ )
else:
# Finally we have to deal with imports fitting on one line
SCREAMING_SNAKE_CASE : int = _re_bracket_content.sub(_replace , a_ )
return import_statement
def __A ( a_ : List[Any] , a_ : Tuple=True )-> Optional[int]:
'''simple docstring'''
with open(a_ , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
SCREAMING_SNAKE_CASE : List[str] = split_code_in_indented_blocks(
a_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
SCREAMING_SNAKE_CASE : List[Any] = main_blocks[block_idx]
SCREAMING_SNAKE_CASE : str = block.split('''\n''' )
# Get to the start of the imports.
SCREAMING_SNAKE_CASE : List[str] = 0
while line_idx < len(a_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
SCREAMING_SNAKE_CASE : Dict = len(a_ )
else:
line_idx += 1
if line_idx >= len(a_ ):
continue
# Ignore beginning and last line: they don't contain anything.
SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(block_lines[line_idx:-1] )
SCREAMING_SNAKE_CASE : Optional[int] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
SCREAMING_SNAKE_CASE : Union[str, Any] = split_code_in_indented_blocks(a_ , indent_level=a_ )
# We have two categories of import key: list or _import_structure[key].append/extend
SCREAMING_SNAKE_CASE : List[str] = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
SCREAMING_SNAKE_CASE : List[str] = [(pattern.search(a_ ).groups()[0] if pattern.search(a_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
SCREAMING_SNAKE_CASE : Dict = [(i, key) for i, key in enumerate(a_ ) if key is not None]
SCREAMING_SNAKE_CASE : List[str] = [x[0] for x in sorted(a_ , key=lambda a_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : List[str] = []
for i in range(len(a_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(a_ )
count += 1
# And we put our main block back together with its first and last line.
SCREAMING_SNAKE_CASE : int = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(a_ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(a_ ) )
def __A ( a_ : Optional[int]=True )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for root, _, files in os.walk(a_ ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE : Optional[Any] = sort_imports(os.path.join(a_ , '''__init__.py''' ) , check_only=a_ )
if result:
SCREAMING_SNAKE_CASE : List[str] = [os.path.join(a_ , '''__init__.py''' )]
if len(a_ ) > 0:
raise ValueError(F"Would overwrite {len(a_ )} files, run `make style`." )
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowerCamelCase__ : Tuple = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 698
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase__ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase__ : int = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase__ : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class lowercase__:
'''simple docstring'''
def __call__( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = None , **lowerCamelCase_ :Tuple , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : List[str] = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [titles]
SCREAMING_SNAKE_CASE : Dict = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [texts]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), f"There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts."
SCREAMING_SNAKE_CASE : Any = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :BatchEncoding , lowerCamelCase_ :DPRReaderOutput , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = reader_output[:3]
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(range(lowerCamelCase_ ) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = DPRReaderTokenizer
| 698
| 1
|
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = len(lowerCamelCase__ )
lowerCAmelCase__ = len(matrix[0] )
lowerCAmelCase__ = min(lowerCamelCase__ , lowerCamelCase__ )
for row in range(lowerCamelCase__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCamelCase__ ):
lowerCAmelCase__ = matrix[col][row] / matrix[row][row]
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase__ = True
for i in range(row + 1 , lowerCamelCase__ ):
if matrix[i][row] != 0:
lowerCAmelCase__ , lowerCAmelCase__ = matrix[i], matrix[row]
lowerCAmelCase__ = False
break
if reduce:
rank -= 1
for i in range(lowerCamelCase__ ):
lowerCAmelCase__ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
"""simple docstring"""
import pprint
import requests
__lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = random_quotes()
pprint.pprint(response)
| 674
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=1_3 , _A=3 , _A=2_2_4 , _A=3_0 , _A=4_0_0 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase : int = size if size is not None else {"""height""": 1_8, """width""": 1_8}
UpperCamelCase : int = parent
UpperCamelCase : Any = batch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : List[str] = image_size
UpperCamelCase : Optional[int] = min_resolution
UpperCamelCase : List[str] = max_resolution
UpperCamelCase : List[Any] = do_resize
UpperCamelCase : int = size
UpperCamelCase : Optional[Any] = do_normalize
UpperCamelCase : int = image_mean
UpperCamelCase : int = image_std
def _a ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : str = ViTImageProcessor if is_vision_available() else None
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self )
@property
def _a ( self ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , """image_mean""" ) )
self.assertTrue(hasattr(_A , """image_std""" ) )
self.assertTrue(hasattr(_A , """do_normalize""" ) )
self.assertTrue(hasattr(_A , """do_resize""" ) )
self.assertTrue(hasattr(_A , """size""" ) )
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCamelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase : Union[str, Any] = image_processor(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCamelCase : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase : Tuple = image_processor(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCamelCase : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase : Any = image_processor(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 102
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = 'xmod'
def __init__( self : Optional[Any] , lowerCamelCase__ : Optional[int]=30_522 , lowerCamelCase__ : Union[str, Any]=768 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Any=12 , lowerCamelCase__ : Optional[Any]=3_072 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : List[str]=512 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Dict=1e-1_2 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : List[str]="absolute" , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Tuple=("en_XX",) , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
__lowercase = pre_norm
__lowercase = adapter_reduction_factor
__lowercase = adapter_layer_norm
__lowercase = adapter_reuse_layer_norm
__lowercase = ln_before_adapter
__lowercase = list(lowerCamelCase__ )
__lowercase = default_language
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 332
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,*_snake_case ,**_snake_case ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 700
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""DeiTFeatureExtractor"""]
_lowerCamelCase = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , _snake_case : Optional[int] , _snake_case : Any=7 , _snake_case : int=3 , _snake_case : Tuple=18 , _snake_case : Tuple=30 , _snake_case : Tuple=4_00 , _snake_case : List[str]=True , _snake_case : str=None , _snake_case : int=True , _snake_case : Tuple=None , _snake_case : Tuple=True , _snake_case : Optional[int]=[0.5, 0.5, 0.5] , _snake_case : Any=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A__ = size if size is not None else {'shortest_edge': 18}
A__ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def _a ( self : Any ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = LevitImageProcessor if is_vision_available() else None
def _a ( self : str ):
"""simple docstring"""
A__ = LevitImageProcessingTester(self )
@property
def _a ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'image_mean' ) )
self.assertTrue(hasattr(_snake_case , 'image_std' ) )
self.assertTrue(hasattr(_snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
def _a ( self : int ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def _a ( self : str ):
"""simple docstring"""
pass
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 9
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9
| 1
|
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ) -> Union[str, Any]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ )
else:
__a = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ )
for i, tensor in enumerate(lowerCAmelCase__ ):
if padding_side == "right":
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
__a = ord(lowerCAmelCase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(lowerCAmelCase__ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : PreTrainedTokenizerBase
__UpperCAmelCase : Union[bool, str, PaddingStrategy] = True
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = -1_0_0
__UpperCAmelCase : str = "pt"
def __UpperCAmelCase ( self , _a ):
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids'''] ).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(_a , -1 , _a , _a )
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(_a , (-1, -1) , _a , _a )
__a = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 700
|
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> int:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Any ) -> Any:
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = metric_id
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Any = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def __UpperCAmelCase ( self ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> Optional[int]:
if "tmp_path" in args:
__a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*lowerCAmelCase__ )
| 65
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCAmelCase : Optional[int] = TypeVar('T')
class lowerCamelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase : list[T] , UpperCamelCase : Callable[[T, T], T] ):
'''simple docstring'''
__UpperCAmelCase : Any | T = None
__UpperCAmelCase : int = len(UpperCamelCase )
__UpperCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr
__UpperCAmelCase : List[str] = fnc
self.build()
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
__UpperCAmelCase : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : T ):
'''simple docstring'''
p += self.N
__UpperCAmelCase : int = v
while p > 1:
__UpperCAmelCase : Any = p // 2
__UpperCAmelCase : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : int ): # noqa: E741
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = l + self.N, r + self.N
__UpperCAmelCase : T | None = None
while l <= r:
if l % 2 == 1:
__UpperCAmelCase : List[str] = self.st[l] if res is None else self.fn(UpperCamelCase , self.st[l] )
if r % 2 == 0:
__UpperCAmelCase : Dict = self.st[r] if res is None else self.fn(UpperCamelCase , self.st[r] )
__UpperCAmelCase ,__UpperCAmelCase : Dict = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCAmelCase : Optional[Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
UpperCAmelCase : List[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
UpperCAmelCase : Union[str, Any] = SegmentTree(test_array, min)
UpperCAmelCase : List[Any] = SegmentTree(test_array, max)
UpperCAmelCase : Any = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase ( ) -> None:
'''simple docstring'''
for i in range(len(_UpperCamelCase ) ):
for j in range(_UpperCamelCase , len(_UpperCamelCase ) ):
__UpperCAmelCase : Optional[Any] = reduce(_UpperCamelCase , test_array[i : j + 1] )
__UpperCAmelCase : str = reduce(_UpperCamelCase , test_array[i : j + 1] )
__UpperCAmelCase : Optional[Any] = reduce(lambda _UpperCamelCase , _UpperCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_UpperCamelCase , _UpperCamelCase )
assert max_range == max_segment_tree.query(_UpperCamelCase , _UpperCamelCase )
assert sum_range == sum_segment_tree.query(_UpperCamelCase , _UpperCamelCase )
test_all_segments()
for index, value in test_updates.items():
UpperCAmelCase : str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 139
|
"""simple docstring"""
from math import pow
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , ) -> tuple[int, int]:
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__UpperCAmelCase : List[str] = int(pow(_UpperCamelCase , _UpperCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = backtrack(
_UpperCamelCase , _UpperCamelCase , current_number + 1 , _UpperCamelCase , _UpperCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = backtrack(
_UpperCamelCase , _UpperCamelCase , current_number + 1 , _UpperCamelCase , _UpperCamelCase )
return current_sum, solutions_count
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
'''simple docstring'''
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(_UpperCamelCase , _UpperCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Dict , snake_case_ : Dict ):
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ):
'''simple docstring'''
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
def lowerCAmelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
UpperCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 2_50
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval()
UpperCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ = "conditional_detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ = conditional_detr(snake_case_ )
UpperCAmelCase_ = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Any =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 715
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = """Wav2Vec2FeatureExtractor"""
a__ : Dict = """AutoTokenizer"""
def __init__(self : Any , __a : Union[str, Any] , __a : List[str] ):
super().__init__(__a , __a )
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
@classmethod
def _lowercase (cls : List[str] , __a : Dict , **__a : int ):
try:
return super().from_pretrained(__a , **__a )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , __a , )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(__a , **__a )
UpperCAmelCase_ = WavaVecaCTCTokenizer.from_pretrained(__a , **__a )
return cls(feature_extractor=__a , tokenizer=__a )
def __call__(self : Optional[int] , *__a : int , **__a : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase_ = kwargs.pop("raw_speech" )
else:
UpperCAmelCase_ = kwargs.pop("audio" , __a )
UpperCAmelCase_ = kwargs.pop("sampling_rate" , __a )
UpperCAmelCase_ = kwargs.pop("text" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
UpperCAmelCase_ = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ = encodings["input_ids"]
return inputs
def _lowercase (self : List[Any] , *__a : Optional[Any] , **__a : Dict ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__a , **__a )
UpperCAmelCase_ = kwargs.pop("input_features" , __a )
UpperCAmelCase_ = kwargs.pop("labels" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if input_features is not None:
UpperCAmelCase_ = self.feature_extractor.pad(__a , *__a , **__a )
if labels is not None:
UpperCAmelCase_ = self.tokenizer.pad(__a , **__a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ = labels["input_ids"]
return input_features
def _lowercase (self : Tuple , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.batch_decode(*__a , **__a )
def _lowercase (self : Any , *__a : Any , **__a : str ):
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def _lowercase (self : Optional[Any] ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer
yield
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
| 415
| 0
|
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=a_ ):
'''simple docstring'''
__a: Union[str, Any] = ['''speech''']
def __init__( self , *lowercase_ , **lowercase_ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['speech'] )
class a_ ( metaclass=a_ ):
'''simple docstring'''
__a: str = ['''speech''']
def __init__( self , *lowercase_ , **lowercase_ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['speech'] )
| 318
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class a_ ( a_ ):
'''simple docstring'''
__a: jnp.ndarray
__a: jnp.ndarray
class a_ ( nn.Module ):
'''simple docstring'''
__a: int
__a: Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
__a: jnp.dtype = jnp.floataa
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase_ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCAmelCase_ = self.block_out_channels[i]
lowerCAmelCase_ = self.block_out_channels[i + 1]
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase_ )
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase_ )
lowerCAmelCase_ = blocks
lowerCAmelCase_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.conv_in(lowercase_ )
lowerCAmelCase_ = nn.silu(lowercase_ )
for block in self.blocks:
lowerCAmelCase_ = block(lowercase_ )
lowerCAmelCase_ = nn.silu(lowercase_ )
lowerCAmelCase_ = self.conv_out(lowercase_ )
return embedding
@flax_register_to_config
class a_ ( nn.Module , a_ , a_ ):
'''simple docstring'''
__a: int = 3_2
__a: int = 4
__a: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__a: Union[bool, Tuple[bool]] = False
__a: Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__a: int = 2
__a: Union[int, Tuple[int]] = 8
__a: Optional[Union[int, Tuple[int]]] = None
__a: int = 1_2_8_0
__a: float = 0.0
__a: bool = False
__a: jnp.dtype = jnp.floataa
__a: bool = True
__a: int = 0
__a: str = "rgb"
__a: Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def _lowercase ( self , lowercase_ ) -> FrozenDict:
'''simple docstring'''
lowerCAmelCase_ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase_ = jnp.zeros(lowercase_ , dtype=jnp.floataa )
lowerCAmelCase_ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCAmelCase_ = jnp.zeros(lowercase_ , dtype=jnp.floataa )
lowerCAmelCase_ , lowerCAmelCase_ = jax.random.split(lowercase_ )
lowerCAmelCase_ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"]
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.block_out_channels
lowerCAmelCase_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase_ = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype )
lowerCAmelCase_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCAmelCase_ = self.only_cross_attention
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = block_out_channels[0]
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase_ = output_channel
lowerCAmelCase_ = block_out_channels[i]
lowerCAmelCase_ = i == len(lowercase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase_ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCAmelCase_ = FlaxDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase_ )
for _ in range(self.layers_per_block ):
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
if not is_final_block:
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
lowerCAmelCase_ = down_blocks
lowerCAmelCase_ = controlnet_down_blocks
# mid
lowerCAmelCase_ = block_out_channels[-1]
lowerCAmelCase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1.0 , lowercase_ = True , lowercase_ = False , ) -> Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
lowerCAmelCase_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCAmelCase_ = jnp.flip(lowercase_ , axis=1 )
# 1. time
if not isinstance(lowercase_ , jnp.ndarray ):
lowerCAmelCase_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase_ = jnp.expand_dims(lowercase_ , 0 )
lowerCAmelCase_ = self.time_proj(lowercase_ )
lowerCAmelCase_ = self.time_embedding(lowercase_ )
# 2. pre-process
lowerCAmelCase_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
lowerCAmelCase_ = self.conv_in(lowercase_ )
lowerCAmelCase_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
lowerCAmelCase_ = self.controlnet_cond_embedding(lowercase_ )
sample += controlnet_cond
# 3. down
lowerCAmelCase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ , lowerCAmelCase_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
else:
lowerCAmelCase_ , lowerCAmelCase_ = down_block(lowercase_ , lowercase_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCAmelCase_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
# 5. contronet blocks
lowerCAmelCase_ = ()
for down_block_res_sample, controlnet_block in zip(lowercase_ , self.controlnet_down_blocks ):
lowerCAmelCase_ = controlnet_block(lowercase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase_ = controlnet_down_block_res_samples
lowerCAmelCase_ = self.controlnet_mid_block(lowercase_ )
# 6. scaling
lowerCAmelCase_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase_ , mid_block_res_sample=lowercase_ )
| 318
| 1
|
'''simple docstring'''
from math import factorial
def _snake_case ( lowercase = 1_0_0 ) -> int:
return sum(int(lowercase ) for x in str(factorial(lowercase ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 697
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) )
| 697
| 1
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_snake_case = logging.get_logger(__name__)
class _snake_case ( _lowercase ):
def __init__( self: List[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[str] ) -> None:
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 382
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 382
| 1
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
# TODO Update this
A__ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = "esm"
def __init__( self: Optional[Any] , __UpperCamelCase: Optional[int]=None , __UpperCamelCase: Optional[Any]=None , __UpperCamelCase: Dict=None , __UpperCamelCase: Any=7_68 , __UpperCamelCase: List[str]=12 , __UpperCamelCase: Dict=12 , __UpperCamelCase: List[Any]=30_72 , __UpperCamelCase: Any=0.1 , __UpperCamelCase: Tuple=0.1 , __UpperCamelCase: Any=10_26 , __UpperCamelCase: Optional[Any]=0.02 , __UpperCamelCase: Optional[int]=1E-12 , __UpperCamelCase: List[str]="absolute" , __UpperCamelCase: int=True , __UpperCamelCase: Optional[int]=None , __UpperCamelCase: List[Any]=False , __UpperCamelCase: Optional[int]=False , __UpperCamelCase: Optional[Any]=None , __UpperCamelCase: Dict=None , **__UpperCamelCase: Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , mask_token_id=__UpperCamelCase , **__UpperCamelCase )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = use_cache
__magic_name__ = emb_layer_norm_before
__magic_name__ = token_dropout
__magic_name__ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__magic_name__ = EsmFoldConfig()
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ = EsmFoldConfig(**__UpperCamelCase )
__magic_name__ = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__magic_name__ = get_default_vocab_list()
else:
__magic_name__ = vocab_list
else:
__magic_name__ = None
__magic_name__ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , __UpperCamelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = super().to_dict()
if isinstance(self.esmfold_config , __UpperCamelCase ):
__magic_name__ = self.esmfold_config.to_dict()
return output
@dataclass
class __UpperCamelCase :
_lowercase : str = None
_lowercase : bool = True
_lowercase : bool = False
_lowercase : bool = False
_lowercase : bool = False
_lowercase : float = 0
_lowercase : bool = True
_lowercase : bool = False
_lowercase : int = 1_2_8
_lowercase : "TrunkConfig" = None
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
if self.trunk is None:
__magic_name__ = TrunkConfig()
elif isinstance(self.trunk , __UpperCamelCase ):
__magic_name__ = TrunkConfig(**self.trunk )
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = asdict(self )
__magic_name__ = self.trunk.to_dict()
return output
@dataclass
class __UpperCamelCase :
_lowercase : int = 4_8
_lowercase : int = 1_0_2_4
_lowercase : int = 1_2_8
_lowercase : int = 3_2
_lowercase : int = 3_2
_lowercase : int = 3_2
_lowercase : float = 0
_lowercase : float = 0
_lowercase : bool = False
_lowercase : int = 4
_lowercase : Optional[int] = 1_2_8
_lowercase : "StructureModuleConfig" = None
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
if self.structure_module is None:
__magic_name__ = StructureModuleConfig()
elif isinstance(self.structure_module , __UpperCamelCase ):
__magic_name__ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__magic_name__ = self.sequence_state_dim // self.sequence_head_width
__magic_name__ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = asdict(self )
__magic_name__ = self.structure_module.to_dict()
return output
@dataclass
class __UpperCamelCase :
_lowercase : int = 3_8_4
_lowercase : int = 1_2_8
_lowercase : int = 1_6
_lowercase : int = 1_2_8
_lowercase : int = 1_2
_lowercase : int = 4
_lowercase : int = 8
_lowercase : float = 0.1
_lowercase : int = 8
_lowercase : int = 1
_lowercase : int = 2
_lowercase : int = 7
_lowercase : int = 1_0
_lowercase : float = 1E-8
_lowercase : float = 1E5
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
return asdict(self )
def _lowercase ( ) -> List[str]:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 706
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained('google/mt5-small' )
__magic_name__ = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__magic_name__ = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__magic_name__ = model(input_ids.to(__UpperCamelCase ) , labels=labels.to(__UpperCamelCase ) ).loss
__magic_name__ = -(labels.shape[-1] * loss.item())
__magic_name__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 184
| 0
|
'''simple docstring'''
from __future__ import annotations
def A (__lowerCamelCase :str , __lowerCamelCase :list[str] | None = None , __lowerCamelCase :dict[str, float] | None = None , __lowerCamelCase :bool = False , ):
_lowerCAmelCase = cipher_alphabet or [chr(__lowerCamelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_lowerCAmelCase = {
"""a""": 0.08_497,
"""b""": 0.01_492,
"""c""": 0.02_202,
"""d""": 0.04_253,
"""e""": 0.11_162,
"""f""": 0.02_228,
"""g""": 0.02_015,
"""h""": 0.06_094,
"""i""": 0.07_546,
"""j""": 0.00_153,
"""k""": 0.01_292,
"""l""": 0.04_025,
"""m""": 0.02_406,
"""n""": 0.06_749,
"""o""": 0.07_507,
"""p""": 0.01_929,
"""q""": 0.00_095,
"""r""": 0.07_587,
"""s""": 0.06_327,
"""t""": 0.09_356,
"""u""": 0.02_758,
"""v""": 0.00_978,
"""w""": 0.02_560,
"""x""": 0.00_150,
"""y""": 0.01_994,
"""z""": 0.00_077,
}
else:
# Custom frequencies dictionary
_lowerCAmelCase = frequencies_dict
if not case_sensitive:
_lowerCAmelCase = ciphertext.lower()
# Chi squared statistic values
_lowerCAmelCase = {}
# cycle through all of the shifts
for shift in range(len(__lowerCamelCase ) ):
_lowerCAmelCase = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_lowerCAmelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
__lowerCamelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_lowerCAmelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_lowerCAmelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_lowerCAmelCase = decrypted_with_shift.lower().count(__lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowerCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowerCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_lowerCAmelCase = decrypted_with_shift.count(__lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowerCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowerCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_lowerCAmelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__lowerCamelCase :int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_lowerCAmelCase = min(
__lowerCamelCase , key=__lowerCamelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 5
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Dict:
__snake_case = 0
def lowercase ( self : Tuple ) -> str:
__snake_case = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : Tuple ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
__snake_case = Path(A_ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A_ , '''w''' ) )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : List[str] ) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
__snake_case = Path(A_ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A_ , '''w''' ) )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
__snake_case = Path(A_ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A_ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__snake_case = AutoImageProcessor.from_pretrained(A_ ).to_dict()
config_dict.pop('''image_processor_type''' )
__snake_case = CLIPImageProcessor(**A_ )
# save in new folder
model_config.save_pretrained(A_ )
config.save_pretrained(A_ )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
# make sure private variable is not incorrectly saved
__snake_case = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : str ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
with self.assertRaisesRegex(
A_ , '''clip-base is not a local folder and is not a valid model identifier''' ):
__snake_case = AutoImageProcessor.from_pretrained('''clip-base''' )
def lowercase ( self : str ) -> Any:
with self.assertRaisesRegex(
A_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__snake_case = AutoImageProcessor.from_pretrained(A_ , revision='''aaaaaa''' )
def lowercase ( self : int ) -> Any:
with self.assertRaisesRegex(
A_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase ( self : Dict ) -> Optional[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A_ ):
__snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
__snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A_ )
__snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A_ )
__snake_case = AutoImageProcessor.from_pretrained(A_ , trust_remote_code=A_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def lowercase ( self : List[str] ) -> Union[str, Any]:
try:
AutoConfig.register('''custom''' , A_ )
AutoImageProcessor.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoImageProcessor.register(A_ , A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
__snake_case = Path(A_ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A_ , '''w''' ) )
__snake_case = CustomImageProcessor.from_pretrained(A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A_ )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase ( self : int ) -> List[Any]:
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : str = True
try:
AutoConfig.register('''custom''' , A_ )
AutoImageProcessor.register(A_ , A_ )
# If remote code is not set, the default is to use local
__snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A_ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 564
| 0
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_a : Union[str, Any] = ''
_a : Optional[Any] = ''
_a : List[str] = ''
_a : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase , _lowerCAmelCase : int = get_dataset(_lowerCamelCase ,_lowerCamelCase )
print("""Processing...""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = update_image_and_anno(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for index, image in enumerate(_lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase : Tuple = random_chars(32 )
_lowerCAmelCase : Union[str, Any] = paths[index].split(os.sep )[-1].rsplit(""".""" ,1 )[0]
_lowerCAmelCase : List[str] = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(f"/{file_root}.jpg" ,_lowerCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Success {index+1}/{len(_lowerCamelCase )} with {file_name}" )
_lowerCAmelCase : Tuple = []
for anno in new_annos[index]:
_lowerCAmelCase : str = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(_lowerCamelCase )
with open(f"/{file_root}.txt" ,"""w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> tuple[list, list]:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Optional[Any] = []
for label_file in glob.glob(os.path.join(_lowerCamelCase ,"""*.txt""" ) ):
_lowerCAmelCase : Optional[int] = label_file.split(os.sep )[-1].rsplit(""".""" ,1 )[0]
with open(_lowerCamelCase ) as in_file:
_lowerCAmelCase : Union[str, Any] = in_file.readlines()
_lowerCAmelCase : str = os.path.join(_lowerCamelCase ,f"{label_name}.jpg" )
_lowerCAmelCase : Dict = []
for obj_list in obj_lists:
_lowerCAmelCase : Dict = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : list ,_lowerCamelCase : int = 1 ) -> tuple[list, list, list]:
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = []
_lowerCAmelCase : Tuple = []
for idx in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Dict = img_list[idx]
path_list.append(_lowerCamelCase )
_lowerCAmelCase : Any = anno_list[idx]
_lowerCAmelCase : Any = cva.imread(_lowerCamelCase )
if flip_type == 1:
_lowerCAmelCase : List[Any] = cva.flip(_lowerCamelCase ,_lowerCamelCase )
for bbox in img_annos:
_lowerCAmelCase : Union[str, Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase : str = cva.flip(_lowerCamelCase ,_lowerCamelCase )
for bbox in img_annos:
_lowerCAmelCase : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowerCamelCase )
new_imgs_list.append(_lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 663
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663
| 1
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
return [sentence[i : i + ngram_size] for i in range(len(__lowerCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
|
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: str ) -> bool:
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def _UpperCamelCase ( lowerCAmelCase__: str ) -> bool:
SCREAMING_SNAKE_CASE_ = credit_card_number
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ ,-1 ,-2 ):
# double the value of every second digit
SCREAMING_SNAKE_CASE_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
SCREAMING_SNAKE_CASE_ = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 ,-1 ,-2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _UpperCamelCase ( lowerCAmelCase__: str ) -> bool:
SCREAMING_SNAKE_CASE_ = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(lowerCAmelCase__ ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 294
| 0
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> list:
__a = len(lowerCAmelCase__ )
__a = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
__a = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ['input_features', 'attention_mask']
def __init__( self , _a=80 , _a=16_000 , _a=0.0 , _a=10 , _a=25 , _a="hamming_window" , _a=3_2768.0 , _a=0.97 , _a=1.0 , _a=True , _a=True , _a=False , **_a , ):
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = hop_length
__a = win_length
__a = frame_signal_scale
__a = preemphasis_coeff
__a = mel_floor
__a = normalize_means
__a = normalize_vars
__a = win_function
__a = return_attention_mask
__a = win_length * sampling_rate // 1_000
__a = hop_length * sampling_rate // 1_000
__a = optimal_fft_length(self.sample_size )
__a = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self , _a ):
if self.win_function == "hamming_window":
__a = window_function(window_length=self.sample_size , name=self.win_function , periodic=_a )
else:
__a = window_function(window_length=self.sample_size , name=self.win_function )
__a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a = spectrogram(
one_waveform * self.frame_signal_scale , window=_a , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_a , preemphasis=self.preemphasis_coeff , mel_filters=_a , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def __UpperCAmelCase ( self , _a , _a , _a ):
# make sure we normalize float32 arrays
if self.normalize_means:
__a = x[:input_length].mean(axis=0 )
__a = np.subtract(_a , _a )
if self.normalize_vars:
__a = x[:input_length].std(axis=0 )
__a = np.divide(_a , _a )
if input_length < x.shape[0]:
__a = padding_value
# make sure array is in float32
__a = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self , _a , _a = None ):
__a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_a , _a , self.padding_value ) for x, n in zip(_a , _a )]
def __call__( self , _a , _a = False , _a = None , _a = False , _a = None , _a = None , _a = None , _a = None , **_a , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__a = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__a = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
__a = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [raw_speech]
# extract fbank features
__a = [self._extract_mfsc_features(_a ) for one_waveform in raw_speech]
# convert into correct format for padding
__a = BatchFeature({'''input_features''': features} )
__a = self.pad(
_a , padding=_a , max_length=_a , truncation=_a , pad_to_multiple_of=_a , return_attention_mask=_a , **_a , )
# make sure list is in array format
__a = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _a ):
__a = [np.asarray(_a , dtype=np.floataa ) for feature in input_features]
__a = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__a = [np.asarray(_a , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a = (
np.array(_a , dtype=np.intaa )
if self._get_padding_strategies(_a , max_length=_a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a = self.normalize(
padded_inputs['''input_features'''] , attention_mask=_a )
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 65
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 401
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
_A = tempfile.mkdtemp()
# fmt: off
_A = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_A = {"""unk_token""": """<unk>"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
_A = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_A = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ )
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_A = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A = self.get_image_processor(do_normalize=lowerCAmelCase_ )
_A = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = """lower newer"""
_A = processor(text=lowerCAmelCase_ , return_tensors="""np""" )
_A = tokenizer(lowerCAmelCase_ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
_A = """google/owlvit-base-patch32"""
_A = OwlViTProcessor.from_pretrained(lowerCAmelCase_ )
_A = ["""cat""", """nasa badge"""]
_A = processor(text=lowerCAmelCase_ )
_A = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> Optional[int]:
_A = """google/owlvit-base-patch32"""
_A = OwlViTProcessor.from_pretrained(lowerCAmelCase_ )
_A = [["""cat""", """nasa badge"""], ["""person"""]]
_A = processor(text=lowerCAmelCase_ )
_A = 16
_A = len(lowerCAmelCase_ )
_A = max([len(lowerCAmelCase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> Tuple:
_A = """google/owlvit-base-patch32"""
_A = OwlViTProcessor.from_pretrained(lowerCAmelCase_ )
_A = ["""cat""", """nasa badge"""]
_A = processor(text=lowerCAmelCase_ )
_A = 16
_A = inputs["""input_ids"""]
_A = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = self.prepare_image_inputs()
_A = processor(images=lowerCAmelCase_ , query_images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 401
| 1
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , ) -> int:
'''simple docstring'''
if config_name_or_path is None:
__lowerCAmelCase = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
__lowerCAmelCase = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__lowerCAmelCase = question_encoder_name_or_path
__lowerCAmelCase = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
__lowerCAmelCase = RagConfig.from_pretrained(_A )
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
__lowerCAmelCase = gen_config
__lowerCAmelCase = question_encoder_config
__lowerCAmelCase = model_class.from_pretrained_question_encoder_generator(
_A , _A , config=_A )
rag_model.save_pretrained(_A )
# Sanity check.
model_class.from_pretrained(_A )
# Save tokenizers.
__lowerCAmelCase = AutoTokenizer.from_pretrained(_A )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_A )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
__A : Tuple = parser.parse_args()
__A : str = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 705
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 334
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.