code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =os.path.dirname(os.path.realpath(_A ) )
a__ =os.path.join(_A , '''words.txt''' )
a__ =''''''
with open(_A ) as f:
a__ =f.readline()
a__ =[word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
a__ =[
word
for word in [sum(ord(_A ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_A )
if __name__ == "__main__":
print(solution())
| 188
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( _A : List[Any] ):
'''simple docstring'''
if "resnet-50" in model_name:
a__ =ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
a__ =ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
a__ =DetrConfig(use_timm_backbone=_A , backbone_config=_A )
# set label attributes
a__ ='''panoptic''' in model_name
if is_panoptic:
a__ =2_50
else:
a__ =91
a__ ='''huggingface/label-files'''
a__ ='''coco-detection-id2label.json'''
a__ =json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
a__ ={int(_A ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCAmelCase__ ( _A : Optional[int] ):
'''simple docstring'''
a__ =[]
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def UpperCAmelCase__ ( _A : str , _A : Union[str, Any] , _A : int ):
'''simple docstring'''
a__ =state_dict.pop(_A )
a__ =val
def UpperCAmelCase__ ( _A : Dict , _A : str=False ):
'''simple docstring'''
a__ =''''''
if is_panoptic:
a__ ='''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[:2_56, :]
a__ =in_proj_bias[:2_56]
a__ =in_proj_weight[2_56:5_12, :]
a__ =in_proj_bias[2_56:5_12]
a__ =in_proj_weight[-2_56:, :]
a__ =in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[:2_56, :]
a__ =in_proj_bias[:2_56]
a__ =in_proj_weight[2_56:5_12, :]
a__ =in_proj_bias[2_56:5_12]
a__ =in_proj_weight[-2_56:, :]
a__ =in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
a__ =state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a__ =in_proj_weight_cross_attn[:2_56, :]
a__ =in_proj_bias_cross_attn[:2_56]
a__ =in_proj_weight_cross_attn[2_56:5_12, :]
a__ =in_proj_bias_cross_attn[2_56:5_12]
a__ =in_proj_weight_cross_attn[-2_56:, :]
a__ =in_proj_bias_cross_attn[-2_56:]
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ =Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _A : Dict , _A : Optional[Any]=None , _A : List[str]=False ):
'''simple docstring'''
a__, a__ =get_detr_config(_A )
# load original model from torch hub
a__ ={
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F"""Converting model {model_name}...""" )
a__ =torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=_A ).eval()
a__ =detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_A ):
if is_panoptic:
a__ ='''detr.''' + src
rename_key(_A , _A , _A )
# query, key and value matrices need special treatment
read_in_q_k_v(_A , is_panoptic=_A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ ='''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
a__ =state_dict.pop(_A )
a__ =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a__ =state_dict.pop(_A )
a__ =val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
a__ =state_dict.pop(_A )
a__ =val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
a__ =state_dict.pop(_A )
a__ =val
# finally, create HuggingFace model and load state dict
a__ =DetrForSegmentation(_A ) if is_panoptic else DetrForObjectDetection(_A )
model.load_state_dict(_A )
model.eval()
# verify our conversion on an image
a__ ='''coco_panoptic''' if is_panoptic else '''coco_detection'''
a__ =DetrImageProcessor(format=_A )
a__ =processor(images=prepare_img() , return_tensors='''pt''' )
a__ =encoding['''pixel_values''']
a__ =detr(_A )
a__ =model(_A )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 188
| 1
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase : Union[str, Any] = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : int ) -> Any:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a : Union[str, Any] = _TestCommandArgs(dataset=snake_case , all_configs=snake_case , save_infos=snake_case )
a : Dict = TestCommand(*snake_case )
test_command.run()
a : str = os.path.join(snake_case , 'README.md' )
assert os.path.exists(snake_case )
a : Dict = DatasetInfosDict.from_directory(snake_case )
a : str = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) , splits=[
{
'name': 'train',
'num_bytes': 2_351_563,
'num_examples': 10_000,
},
{
'name': 'validation',
'num_bytes': 238_418,
'num_examples': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
a : Any = getattr(dataset_infos['default'] , snake_case ), getattr(expected_dataset_infos['default'] , snake_case )
if key == "num_bytes":
assert is_apercent_close(snake_case , snake_case )
elif key == "splits":
assert list(snake_case ) == list(snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 352
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "LayoutLMv3ImageProcessor"
__SCREAMING_SNAKE_CASE = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Any:
_A : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[str] = kwargs.pop("feature_extractor")
_A : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.")
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True.")
# first, apply the image processor
_A : List[Any] = self.image_processor(images=__lowerCamelCase , return_tensors=__lowerCamelCase)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowerCamelCase , __lowerCamelCase):
_A : Dict = [text] # add batch dimension (as the image processor always adds a batch dimension)
_A : Tuple = features["words"]
_A : List[str] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
# add pixel values
_A : Dict = features.pop("pixel_values")
if return_overflowing_tokens is True:
_A : int = self.get_overflowing_images(__lowerCamelCase , encoded_inputs["overflow_to_sample_mapping"])
_A : int = images
return encoded_inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Tuple:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_A : Optional[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(__lowerCamelCase) != len(__lowerCamelCase):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__lowerCamelCase)} and {len(__lowerCamelCase)}")
return images_with_overflow
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> Optional[Any]:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowerCamelCase ( self) -> str:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCamelCase : List[str] = NewType('''DataClass''', Any)
__lowerCamelCase : Dict = NewType('''DataClassType''', Any)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> int:
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> Callable[[str], Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( *,
__UpperCamelCase : Union[str, List[str]] = None , __UpperCamelCase : str = None , __UpperCamelCase : Any = dataclasses.MISSING , __UpperCamelCase : Callable[[], Any] = dataclasses.MISSING , __UpperCamelCase : dict = None , **__UpperCamelCase : Dict , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE__ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE__ = aliases
if help is not None:
SCREAMING_SNAKE_CASE__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = 42
def __init__( self : int , _lowercase : Union[DataClassType, Iterable[DataClassType]] , **_lowercase : List[str] ):
"""simple docstring"""
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter
super().__init__(**_lowercase )
if dataclasses.is_dataclass(_lowercase ):
SCREAMING_SNAKE_CASE__ = [dataclass_types]
SCREAMING_SNAKE_CASE__ = list(_lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowercase )
@staticmethod
def __a ( _lowercase : ArgumentParser , _lowercase : dataclasses.Field ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = f"""--{field.name}"""
SCREAMING_SNAKE_CASE__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowercase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""aliases""" , [] )
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = [aliases]
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(_lowercase , """UnionType""" ) and isinstance(_lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowercase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f""" Problem encountered in field '{field.name}'.""" )
if type(_lowercase ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE__ = (
field.type.__args__[0] if isinstance(_lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE__ = {}
if origin_type is Literal or (isinstance(field.type , _lowercase ) and issubclass(field.type , _lowercase )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE__ = field.type.__args__
else:
SCREAMING_SNAKE_CASE__ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
else:
SCREAMING_SNAKE_CASE__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE__ = copy(_lowercase )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE__ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE__ = """?"""
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE__ = True
elif isclass(_lowercase ) and issubclass(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = field.type.__args__[0]
SCREAMING_SNAKE_CASE__ = """+"""
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
else:
SCREAMING_SNAKE_CASE__ = True
parser.add_argument(_lowercase , *_lowercase , **_lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE__ = False
parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **_lowercase )
def __a ( self : List[str] , _lowercase : DataClassType ):
"""simple docstring"""
if hasattr(_lowercase , """_argument_group_name""" ):
SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE__ = self
try:
SCREAMING_SNAKE_CASE__ = get_type_hints(_lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowercase ):
SCREAMING_SNAKE_CASE__ = """.""".join(map(_lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(_lowercase ):
if not field.init:
continue
SCREAMING_SNAKE_CASE__ = type_hints[field.name]
self._parse_dataclass_field(_lowercase , _lowercase )
def __a ( self : str , _lowercase : int=None , _lowercase : Optional[Any]=False , _lowercase : Union[str, Any]=True , _lowercase : Any=None , _lowercase : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE__ = []
if args_filename:
args_files.append(Path(_lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE__ = ArgumentParser()
args_file_parser.add_argument(_lowercase , type=_lowercase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=_lowercase )
SCREAMING_SNAKE_CASE__ = vars(_lowercase ).get(args_file_flag.lstrip("""-""" ) , _lowercase )
if cmd_args_file_paths:
args_files.extend([Path(_lowercase ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=_lowercase )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(_lowercase ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(_lowercase ).items() if k in keys}
for k in keys:
delattr(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = dtype(**_lowercase )
outputs.append(_lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __a ( self : List[str] , _lowercase : Dict[str, Any] , _lowercase : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = set(args.keys() )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(_lowercase ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE__ = dtype(**_lowercase )
outputs.append(_lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_lowercase )}""" )
return tuple(_lowercase )
def __a ( self : List[str] , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
with open(Path(_lowercase ) , encoding="""utf-8""" ) as open_json_file:
SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE__ = self.parse_dict(_lowercase , allow_extra_keys=_lowercase )
return tuple(_lowercase )
def __a ( self : Tuple , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(_lowercase ).read_text() ) , allow_extra_keys=_lowercase )
return tuple(_lowercase )
| 219
| 0
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = start
UpperCAmelCase__ : int = end
UpperCAmelCase__ : Optional[int] = val
UpperCAmelCase__ : Tuple = (start + end) // 2
UpperCAmelCase__ : Any = left
UpperCAmelCase__ : Dict = right
def __repr__(self ):
"""simple docstring"""
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = collection
UpperCAmelCase__ : str = function
if self.collection:
UpperCAmelCase__ : str = self._build_tree(0 , len(_lowerCamelCase ) - 1 )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
self._update_tree(self.root , _lowerCamelCase , _lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return self._query_range(self.root , _lowerCamelCase , _lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_lowerCamelCase , _lowerCamelCase , self.collection[start] )
UpperCAmelCase__ : str = (start + end) // 2
UpperCAmelCase__ : Optional[int] = self._build_tree(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : List[str] = self._build_tree(mid + 1 , _lowerCamelCase )
return SegmentTreeNode(_lowerCamelCase , _lowerCamelCase , self.fn(left.val , right.val ) , _lowerCamelCase , _lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if node.start == i and node.end == i:
UpperCAmelCase__ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left , _lowerCamelCase , _lowerCamelCase )
else:
self._update_tree(node.right , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : int = self.fn(node.left.val , node.right.val )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _lowerCamelCase , _lowerCamelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _lowerCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , _lowerCamelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , _lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
if self.root is not None:
UpperCAmelCase__ : Optional[int] = Queue()
queue.put(self.root )
while not queue.empty():
UpperCAmelCase__ : List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
_A = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 166
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Tuple = patch_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : str = scope
UpperCAmelCase__ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : Tuple = num_patches + 2
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _a (self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : List[str] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.type_sequence_label_size
UpperCAmelCase__ : List[str] = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : int = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Tuple = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DeiTModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_lowerCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a (self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase__ : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : int = model(**_lowerCamelCase ).loss
loss.backward()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : Tuple = model(**_lowerCamelCase ).loss
loss.backward()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
UpperCAmelCase__ : List[str] = problem_type["""title"""]
UpperCAmelCase__ : List[Any] = problem_type["""num_labels"""]
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
UpperCAmelCase__ : Optional[int] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
UpperCAmelCase__ : str = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
UpperCAmelCase__ : Any = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _a (self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a (self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_lowerCamelCase )
# verify the logits
UpperCAmelCase__ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase__ : Dict = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : int = prepare_img()
UpperCAmelCase__ : str = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
UpperCAmelCase__ : Dict = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase__ : int = model(_lowerCamelCase )
| 166
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
a : int = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
a : Any = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
a : Any = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def __snake_case (self ) -> int:
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""", inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Value("""string""", id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""", id="""sequence""" ), id="""references""" ),
} ), codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""], reference_urls=[
"""https://github.com/m-popovic/chrF""",
], )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = CHRF.CHAR_ORDER, SCREAMING_SNAKE_CASE_ = CHRF.WORD_ORDER, SCREAMING_SNAKE_CASE_ = CHRF.BETA, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, ) -> List[Any]:
UpperCAmelCase_: List[str] = len(references[0] )
if any(len(__lowercase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCAmelCase_: Dict = [[refs[i] for refs in references] for i in range(__lowercase )]
UpperCAmelCase_: Optional[Any] = CHRF(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
UpperCAmelCase_: Dict = sb_chrf.corpus_score(__lowercase, __lowercase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 147
|
'''simple docstring'''
from math import factorial, radians
def __magic_name__( lowerCamelCase, lowerCamelCase = 1_8, lowerCamelCase = 1_0):
__lowerCAmelCase = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__lowerCAmelCase = radians(lowerCamelCase)
__lowerCAmelCase = angle_in_radians
__lowerCAmelCase = 3
__lowerCAmelCase = -1
for _ in range(lowerCamelCase):
result += (b * (angle_in_radians**a)) / factorial(lowerCamelCase)
__lowerCAmelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCamelCase, lowerCamelCase)
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 174
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : List[str] = "deit"
def __init__(self : Optional[Any] , _A : Optional[Any]=7_6_8 , _A : str=1_2 , _A : int=1_2 , _A : Optional[int]=3_0_7_2 , _A : List[Any]="gelu" , _A : Optional[Any]=0.0 , _A : Dict=0.0 , _A : Optional[int]=0.02 , _A : Optional[Any]=1E-12 , _A : List[str]=2_2_4 , _A : Optional[int]=1_6 , _A : List[Any]=3 , _A : List[Any]=True , _A : List[Any]=1_6 , **_A : Optional[Any] , ) -> Tuple:
super().__init__(**_A )
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = qkv_bias
snake_case = encoder_stride
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : List[str] = version.parse("1.11" )
@property
def UpperCAmelCase(self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase(self : Optional[int] ) -> float:
return 1E-4
| 137
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_A = logging.get_logger(__name__)
def lowercase_ ( A__ , A__ , A__ , A__=None , A__=None ) -> str:
"""simple docstring"""
if "." in tensor_name:
snake_case = tensor_name.split("." )
for split in splits[:-1]:
snake_case = getattr(A__ , A__ )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
snake_case = new_module
snake_case = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
snake_case = tensor_name in module._buffers
snake_case = getattr(A__ , A__ )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
snake_case = False
snake_case = False
if is_buffer or not is_bitsandbytes_available():
snake_case = False
snake_case = False
else:
snake_case = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
snake_case = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
snake_case = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
snake_case = old_value.to(A__ )
elif isinstance(A__ , torch.Tensor ):
snake_case = value.to("cpu" )
if value.dtype == torch.inta:
snake_case = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
snake_case = torch.tensor(A__ , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , A__ ) and fpaa_statistics is None:
snake_case = new_value.T
snake_case = old_value.__dict__
if is_abit:
snake_case = bnb.nn.IntaParams(A__ , requires_grad=A__ , **A__ ).to(A__ )
elif is_abit:
snake_case = bnb.nn.Paramsabit(A__ , requires_grad=A__ , **A__ ).to(A__ )
snake_case = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(A__ ) )
else:
if value is None:
snake_case = old_value.to(A__ )
elif isinstance(A__ , torch.Tensor ):
snake_case = value.to(A__ )
else:
snake_case = torch.tensor(A__ , device=A__ )
if is_buffer:
snake_case = new_value
else:
snake_case = nn.Parameter(A__ , requires_grad=old_value.requires_grad )
snake_case = new_value
def lowercase_ ( A__ , A__=None , A__=None , A__=None , A__=False ) -> Optional[Any]:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
snake_case = []
current_key_name.append(A__ )
if (isinstance(A__ , nn.Linear ) or isinstance(A__ , A__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(A__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(A__ , A__ ):
snake_case , snake_case = module.weight.shape
else:
snake_case = module.in_features
snake_case = module.out_features
if quantization_config.quantization_method() == "llm_int8":
snake_case = bnb.nn.LinearabitLt(
A__ , A__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
snake_case = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
snake_case = bnb.nn.Linearabit(
A__ , A__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
snake_case = True
# Store the module class in case we need to transpose the weight later
snake_case = type(A__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(A__ )
if len(list(module.children() ) ) > 0:
snake_case , snake_case = _replace_with_bnb_linear(
A__ , A__ , A__ , A__ , has_been_replaced=A__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( A__ , A__=None , A__=None , A__=None ) -> List[str]:
"""simple docstring"""
snake_case = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
snake_case , snake_case = _replace_with_bnb_linear(
A__ , A__ , A__ , A__ )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowercase_ ( *A__ , **A__ ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , A__ , )
return replace_with_bnb_linear(*A__ , **A__ )
def lowercase_ ( *A__ , **A__ ) -> Any:
"""simple docstring"""
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , A__ , )
return set_module_quantized_tensor_to_device(*A__ , **A__ )
def lowercase_ ( A__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case = deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
snake_case = find_tied_parameters(A__ )
# For compatibility with Accelerate < 0.18
if isinstance(A__ , A__ ):
snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case = sum(A__ , [] )
snake_case = len(A__ ) > 0
# Check if it is a base model
snake_case = not hasattr(A__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case = list(model.named_children() )
snake_case = [list_modules[-1][0]]
# add last module together with tied weights
snake_case = set(A__ ) - set(A__ )
snake_case = list(set(A__ ) ) + list(A__ )
# remove ".weight" from the keys
snake_case = [".weight", ".bias"]
snake_case = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case = name.replace(A__ , "" )
filtered_module_names.append(A__ )
return filtered_module_names
| 137
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _A ( lowercase ):
"""simple docstring"""
a =SwinvaConfig()
a =swinva_name.split('''_''' )
a =name_split[1]
if "to" in name_split[3]:
a =int(name_split[3][-3:] )
else:
a =int(name_split[3] )
if "to" in name_split[2]:
a =int(name_split[2][-2:] )
else:
a =int(name_split[2][6:] )
if model_size == "tiny":
a =96
a =(2, 2, 6, 2)
a =(3, 6, 12, 24)
elif model_size == "small":
a =96
a =(2, 2, 18, 2)
a =(3, 6, 12, 24)
elif model_size == "base":
a =1_28
a =(2, 2, 18, 2)
a =(4, 8, 16, 32)
else:
a =1_92
a =(2, 2, 18, 2)
a =(6, 12, 24, 48)
if "to" in swinva_name:
a =(12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
a =2_18_41
a ='''huggingface/label-files'''
a ='''imagenet-22k-id2label.json'''
a =json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
a ={int(lowercase ): v for k, v in idalabel.items()}
a =idalabel
a ={v: k for k, v in idalabel.items()}
else:
a =10_00
a ='''huggingface/label-files'''
a ='''imagenet-1k-id2label.json'''
a =json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
a ={int(lowercase ): v for k, v in idalabel.items()}
a =idalabel
a ={v: k for k, v in idalabel.items()}
a =img_size
a =num_classes
a =embed_dim
a =depths
a =num_heads
a =window_size
return config
def _A ( lowercase ):
"""simple docstring"""
if "patch_embed.proj" in name:
a =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
a =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
a ='''encoder.''' + name
if "attn.proj" in name:
a =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
a =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
a =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
a =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
a =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
a =name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
a =name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
a =name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
a =name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
a =name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
a ='''layernorm.weight'''
if name == "norm.bias":
a ='''layernorm.bias'''
if "head" in name:
a =name.replace('''head''' , '''classifier''' )
else:
a ='''swinv2.''' + name
return name
def _A ( lowercase , lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a =orig_state_dict.pop(lowercase )
if "mask" in key:
continue
elif "qkv" in key:
a =key.split('''.''' )
a =int(key_split[1] )
a =int(key_split[3] )
a =model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a =val[:dim, :]
a =val[dim : dim * 2, :]
a =val[-dim:, :]
else:
a =val[:dim]
a =val[
dim : dim * 2
]
a =val[-dim:]
else:
a =val
return orig_state_dict
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
a =get_swinva_config(lowercase )
a =SwinvaForImageClassification(lowercase )
model.eval()
a =convert_state_dict(timm_model.state_dict() , lowercase )
model.load_state_dict(lowercase )
a ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a =AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
a =Image.open(requests.get(lowercase , stream=lowercase ).raw )
a =image_processor(images=lowercase , return_tensors='''pt''' )
a =timm_model(inputs['''pixel_values'''] )
a =model(**lowercase ).logits
assert torch.allclose(lowercase , lowercase , atol=1E-3 )
print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
model.push_to_hub(
repo_path_or_name=Path(lowercase , lowercase ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 81
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__UpperCAmelCase =True
except (ImportError, ModuleNotFoundError):
__UpperCAmelCase =False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 67
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] =get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print('''Processing...''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase : Optional[int] =random_chars(32 )
UpperCAmelCase : Optional[int] =paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCAmelCase : Tuple =f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
UpperCAmelCase : Any =[]
for anno in new_annos[index]:
UpperCAmelCase : Dict =f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(f'''/{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[list, list]:
'''simple docstring'''
UpperCAmelCase : List[Any] =[]
UpperCAmelCase : Dict =[]
for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ):
UpperCAmelCase : str =label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
UpperCAmelCase : Tuple =in_file.readlines()
UpperCAmelCase : Tuple =os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' )
UpperCAmelCase : Optional[int] =[]
for obj_list in obj_lists:
UpperCAmelCase : str =obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 )-> tuple[list, list, list]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =[]
UpperCAmelCase : Tuple =[]
UpperCAmelCase : Optional[int] =[]
for idx in range(len(__lowerCAmelCase ) ):
UpperCAmelCase : List[str] =[]
UpperCAmelCase : Optional[int] =img_list[idx]
path_list.append(__lowerCAmelCase )
UpperCAmelCase : str =anno_list[idx]
UpperCAmelCase : List[str] =cva.imread(__lowerCAmelCase )
if flip_type == 1:
UpperCAmelCase : Tuple =cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
UpperCAmelCase : Tuple =1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase : Union[str, Any] =cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
UpperCAmelCase : List[str] =1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase_ ( __lowerCAmelCase = 32 )-> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase : Optional[int] =ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 78
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.json'''}
__snake_case = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__snake_case = {'''mgp-str''': 27}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case__ , snake_case__="[GO]" , snake_case__="[GO]" , snake_case__="[s]" , snake_case__="[GO]" , **snake_case__ ) -> Any:
'''simple docstring'''
super().__init__(
unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : int =json.load(snake_case__ )
UpperCAmelCase : List[str] ={v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =[]
for s in text:
char_tokens.extend(snake_case__ )
return char_tokens
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(snake_case__ ) )
return
UpperCAmelCase : List[Any] =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '''\n''' )
return (vocab_file,)
| 78
| 1
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Any:
def wrapper(*UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any] ):
lowercase_ : str = timeit.default_timer()
lowercase_ : List[Any] = func(*_a , **_a )
lowercase_ : str = timeit.default_timer() - starttime
return delta
lowercase_ : Any = func.__name__
return wrapper
def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : Union[str, Any]=100 , UpperCAmelCase__ : List[Any]=None ) -> List[Any]:
lowercase_ : Tuple = []
lowercase_ : Optional[Any] = seq_shapes or {}
for i in range(_a ):
lowercase_ : List[Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_a , _ArrayXD ):
lowercase_ : Optional[Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_a , datasets.Value ):
if v.dtype == "string":
lowercase_ : Dict = """The small grey turtle was surprisingly fast when challenged."""
else:
lowercase_ : Optional[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_a , datasets.Sequence ):
while isinstance(_a , datasets.Sequence ):
lowercase_ : Union[str, Any] = v.feature
lowercase_ : int = seq_shapes[k]
lowercase_ : Dict = np.random.rand(*_a ).astype(v.dtype )
lowercase_ : List[str] = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]=100 , UpperCAmelCase__ : int=None ) -> Tuple:
lowercase_ : Dict = generate_examples(_a , num_examples=_a , seq_shapes=_a )
with ArrowWriter(features=_a , path=_a ) as writer:
for key, record in dummy_data:
lowercase_ : str = features.encode_example(_a )
writer.write(_a )
lowercase_ : List[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
lowercase_ : List[str] = datasets.Dataset.from_file(filename=_a , info=datasets.DatasetInfo(features=_a ) )
return dataset
| 239
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase :
def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :int=2 , __magic_name__ :List[str]=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :str=2 , __magic_name__ :Any=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=True , __magic_name__ :List[Any]=99 , __magic_name__ :Dict=36 , __magic_name__ :Optional[Any]=3 , __magic_name__ :str=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Dict="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=16 , __magic_name__ :List[Any]=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=6 , __magic_name__ :Tuple=3 , __magic_name__ :str=4 , __magic_name__ :List[str]=None , __magic_name__ :str=1000 , ):
'''simple docstring'''
a = parent
a = batch_size
a = num_channels
a = image_size
a = patch_size
a = text_seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = coordinate_size
a = shape_size
a = num_labels
a = num_choices
a = scope
a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a = text_seq_length
a = (image_size // patch_size) ** 2 + 1
a = self.text_seq_length + self.image_seq_length
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a = bbox[i, j, 3]
a = bbox[i, j, 1]
a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a = bbox[i, j, 2]
a = bbox[i, j, 0]
a = t
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.text_seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Tuple , __magic_name__ :int ):
'''simple docstring'''
a = LayoutLMvaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# text + image
a = model(__magic_name__ , pixel_values=__magic_name__ )
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , token_type_ids=__magic_name__ )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a = model(pixel_values=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :List[str] ):
'''simple docstring'''
a = self.num_labels
a = LayoutLMvaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Tuple ):
'''simple docstring'''
a = self.num_labels
a = LayoutLMvaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ):
'''simple docstring'''
return True
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = LayoutLMvaModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :Any=False ):
'''simple docstring'''
a = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
a = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__magic_name__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
a = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in get_values(__magic_name__ ):
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in [
*get_values(__magic_name__ ),
]:
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
elif model_class in [
*get_values(__magic_name__ ),
]:
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__magic_name__ , )
return inputs_dict
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = LayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( ) -> str:
a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__magic_name__ )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ )
a = torch.tensor([[1, 2]] )
a = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a = model(
input_ids=input_ids.to(__magic_name__ ) , bbox=bbox.to(__magic_name__ ) , pixel_values=pixel_values.to(__magic_name__ ) , )
# verify the logits
a = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
a = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a = [F'<extra_id_{i}>' for i in range(__magic_name__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
super().__init__(
eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
a = extra_ids
a = 2**8 # utf is 8 bits
# define special tokens dict
a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
a = len(self.special_tokens_encoder )
a = len(__magic_name__ )
for i, token in enumerate(__magic_name__ ):
a = self.vocab_size + i - n
a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ):
'''simple docstring'''
if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = self._add_eos_if_not_present(__magic_name__ )
if token_ids_a is None:
return token_ids_a
else:
a = self._add_eos_if_not_present(__magic_name__ )
return token_ids_a + token_ids_a
def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ):
'''simple docstring'''
a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )]
return tokens
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if token in self.special_tokens_encoder:
a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
a = self.added_tokens_encoder[token]
elif len(__magic_name__ ) != 1:
a = self.unk_token_id
else:
a = ord(__magic_name__ ) + self._num_special_tokens
return token_id
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
if index in self.special_tokens_decoder:
a = self.special_tokens_decoder[index]
else:
a = chr(index - self._num_special_tokens )
return token
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
a = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
a = token.encode("""utf-8""" )
else:
a = bytes([ord(__magic_name__ )] )
bstring += tok_string
a = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
return ()
| 347
| 1
|
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCamelCase = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowerCamelCase = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowerCamelCase = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : int=False):
'''simple docstring'''
__lowercase =spearmanr(_lowerCAmelCase , _lowerCAmelCase)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 166
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if (ksize % 2) == 0:
__lowercase =ksize + 1
__lowercase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCAmelCase ):
for x in range(_lowerCAmelCase ):
# distance from center
__lowercase =x - ksize // 2
__lowercase =y - ksize // 2
# degree to radiant
__lowercase =theta / 180 * np.pi
__lowercase =np.cos(_theta )
__lowercase =np.sin(_theta )
# get kernel x
__lowercase =cos_theta * px + sin_theta * py
# get kernel y
__lowercase =-sin_theta * px + cos_theta * py
# fill kernel
__lowercase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCamelCase = out / out.max() * 255
lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 166
| 1
|
A : List[Any] = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A : int = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A : int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 276
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A : Optional[Any] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
A : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
| 1
|
a_ : List[Any] = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
SCREAMING_SNAKE_CASE = Stack()
SCREAMING_SNAKE_CASE = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_UpperCAmelCase))
elif i in operators:
# RULE 2
operator_stack.push(_UpperCAmelCase)
elif i == ")":
# RULE 4
SCREAMING_SNAKE_CASE = operator_stack.peek()
operator_stack.pop()
SCREAMING_SNAKE_CASE = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE = operators[opr](_UpperCAmelCase , _UpperCAmelCase)
operand_stack.push(_UpperCAmelCase)
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a_ : Optional[Any] = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 137
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
a_ : Optional[int] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
_lowercase : Optional[datasets.Features] = None
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , ):
import pyspark
def generate_fn():
SCREAMING_SNAKE_CASE = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id'))
for partition_id in partition_order:
SCREAMING_SNAKE_CASE = df_with_partition_id.select('*').where(F'''part_id = {partition_id}''').drop('part_id')
SCREAMING_SNAKE_CASE = partition_df.collect()
SCREAMING_SNAKE_CASE = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a , a=None , ) -> Tuple:
SCREAMING_SNAKE_CASE = df
SCREAMING_SNAKE_CASE = partition_order or range(self.df.rdd.getNumPartitions())
SCREAMING_SNAKE_CASE = _generate_iterable_examples(self.df , self.partition_order)
def __iter__( self) -> Dict:
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE__ ( self , a) -> "SparkExamplesIterable":
SCREAMING_SNAKE_CASE = list(range(self.df.rdd.getNumPartitions()))
generator.shuffle(a)
return SparkExamplesIterable(self.df , partition_order=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> "SparkExamplesIterable":
SCREAMING_SNAKE_CASE = self.split_shard_indices_by_worker(a , a)
return SparkExamplesIterable(self.df , partition_order=a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return len(self.partition_order)
class _snake_case ( datasets.DatasetBuilder ):
_lowercase : int = SparkConfig
def __init__( self , a , a = None , a = None , **a , ) -> List[str]:
import pyspark
SCREAMING_SNAKE_CASE = pyspark.sql.SparkSession.builder.getOrCreate()
SCREAMING_SNAKE_CASE = df
SCREAMING_SNAKE_CASE = working_dir
super().__init__(
cache_dir=a , config_name=str(self.df.semanticHash()) , **a , )
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
# Returns the path of the created file.
def create_cache_and_write_probe(a):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a)
SCREAMING_SNAKE_CASE = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex)
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a , 'a')
return [probe_file]
if self._spark.conf.get('spark.master' , '').startswith('local'):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
SCREAMING_SNAKE_CASE = (
self._spark.sparkContext.parallelize(range(1) , 1).mapPartitions(a).collect()
)
if os.path.isfile(probe[0]):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
import pyspark
def get_arrow_batch_size(a):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]})
SCREAMING_SNAKE_CASE = self.df.count()
SCREAMING_SNAKE_CASE = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
SCREAMING_SNAKE_CASE = (
self.df.limit(a)
.repartition(1)
.mapInArrow(a , 'batch_bytes: long')
.agg(pyspark.sql.functions.sum('batch_bytes').alias('sample_bytes'))
.collect()[0]
.sample_bytes
/ sample_num_rows
)
SCREAMING_SNAKE_CASE = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
SCREAMING_SNAKE_CASE = min(a , int(approx_total_size / max_shard_size))
SCREAMING_SNAKE_CASE = self.df.repartition(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
SCREAMING_SNAKE_CASE = ParquetWriter if file_format == 'parquet' else ArrowWriter
SCREAMING_SNAKE_CASE = os.path.join(self._working_dir , os.path.basename(a)) if self._working_dir else fpath
SCREAMING_SNAKE_CASE = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
SCREAMING_SNAKE_CASE = self.config.features
SCREAMING_SNAKE_CASE = self._writer_batch_size
SCREAMING_SNAKE_CASE = self._fs.storage_options
def write_arrow(a):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
SCREAMING_SNAKE_CASE = pyspark.TaskContext().taskAttemptId()
SCREAMING_SNAKE_CASE = next(a , a)
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = writer_class(
features=a , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , writer_batch_size=a , storage_options=a , embed_local_files=a , )
SCREAMING_SNAKE_CASE = pa.Table.from_batches([first_batch])
writer.write_table(a)
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
SCREAMING_SNAKE_CASE = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , writer_batch_size=a , storage_options=a , embed_local_files=a , )
SCREAMING_SNAKE_CASE = pa.Table.from_batches([batch])
writer.write_table(a)
if writer._num_bytes > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a)):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(a) , os.path.basename(a))
shutil.move(a , a)
SCREAMING_SNAKE_CASE = (
self.df.mapInArrow(a , 'task_id: long, num_examples: long, num_bytes: long')
.groupBy('task_id')
.agg(
pyspark.sql.functions.sum('num_examples').alias('total_num_examples') , pyspark.sql.functions.sum('num_bytes').alias('total_num_bytes') , pyspark.sql.functions.count('num_bytes').alias('num_shards') , pyspark.sql.functions.collect_list('num_examples').alias('shard_lengths') , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE__ ( self , a , a = "arrow" , a = None , a = None , **a , ) -> List[Any]:
self._validate_cache_dir()
SCREAMING_SNAKE_CASE = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE)
self._repartition_df_if_needed(a)
SCREAMING_SNAKE_CASE = not is_remote_filesystem(self._fs)
SCREAMING_SNAKE_CASE = os.path.join if is_local else posixpath.join
SCREAMING_SNAKE_CASE = '-TTTTT-SSSSS-of-NNNNN'
SCREAMING_SNAKE_CASE = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
SCREAMING_SNAKE_CASE = path_join(self._output_dir , a)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for task_id, content in self._prepare_split_single(a , a , a):
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards))
all_shard_lengths.extend(a)
SCREAMING_SNAKE_CASE = total_num_examples
SCREAMING_SNAKE_CASE = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''')
if total_shards > 1:
SCREAMING_SNAKE_CASE = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
SCREAMING_SNAKE_CASE = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a , a , a , ):
rename(
a , fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''').replace('NNNNN' , f'''{total_shards:05d}''') , )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for i in range(len(a)):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = task_id_and_num_shards[i]
for shard_id in range(a):
args.append([task_id, shard_id, global_shard_id])
global_shard_id += 1
self._spark.sparkContext.parallelize(a , len(a)).map(lambda a: _rename_shard(*a)).collect()
else:
# don't use any pattern
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , fpath.replace(a , '') , )
def SCREAMING_SNAKE_CASE__ ( self , a , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df)
| 137
| 1
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase__ = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def _a ( a :Dict ) -> Optional[Any]:
a = test_results.split(''' ''' )
a = 0
a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
a = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _a ( a :Optional[int] ) -> List[str]:
a = {}
a = None
a = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , a ):
a = True
a = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
a = line
a = False
return failures
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Dict ) ->List[Any]:
"""simple docstring"""
a = title
a = doc_test_results['''time_spent'''].split(''',''' )[0]
a = doc_test_results['''success''']
a = doc_test_results['''failures''']
a = self.n_success + self.n_failures
# Failures and success of the modeling tests
a = doc_test_results
@property
def __lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
a = [self._time_spent]
a = 0
for time in time_spent:
a = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCAmelCase ) == 1:
a = [0, 0, time_parts[0]]
a , a , a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
a , a , a = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return F"""{int(__UpperCAmelCase )}h{int(__UpperCAmelCase )}m{int(__UpperCAmelCase )}s"""
@property
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
a = 40
a = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
a = ''''''
for category, failures in category_failures.items():
if len(__UpperCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCAmelCase )
@staticmethod
def __lowerCAmelCase ( ) ->Tuple:
"""simple docstring"""
a = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__UpperCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=__UpperCAmelCase , )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
a = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.'''
a = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=__UpperCAmelCase , )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ) ->Optional[int]:
"""simple docstring"""
a = ''''''
for key, value in failures.items():
a = value[:200] + ''' [Truncated]''' if len(__UpperCAmelCase ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
a = job_name
a = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
a = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
a = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
a = sorted(self.doc_test_results.items() , key=lambda __UpperCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
a = F"""*Num failures* :{len(job_result['failed'] )} \n"""
a = job_result['''failures''']
a = self.get_reply_blocks(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text=__UpperCAmelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F"""Results for {job}""" , blocks=__UpperCAmelCase , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def _a ( ) -> Tuple:
a = os.environ['''GITHUB_RUN_ID''']
a = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
a = requests.get(a ).json()
a = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
a = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(a ):
a = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , a )
return {}
def _a ( a :str ) -> Dict:
a = {}
if os.path.exists(a ):
a = os.listdir(a )
for file in files:
try:
with open(os.path.join(a , a ) , encoding='''utf-8''' ) as f:
a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(a , a )}.""" ) from e
return _artifact
def _a ( ) -> int:
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : str ) ->List[Any]:
"""simple docstring"""
a = name
a = []
def __str__( self : int ) ->Optional[int]:
"""simple docstring"""
return self.name
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : str ) ->int:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
a = {}
a = filter(os.path.isdir , os.listdir() )
for directory in directories:
a = directory
if artifact_name not in _available_artifacts:
a = Artifact(a )
_available_artifacts[artifact_name].add_path(a )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase__ = get_job_links()
UpperCAmelCase__ = retrieve_available_artifacts()
UpperCAmelCase__ = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase__ = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase__ = github_actions_job_links.get("run_doctests")
UpperCAmelCase__ = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
UpperCAmelCase__ = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = handle_test_results(artifact["stats"])
UpperCAmelCase__ = failed
UpperCAmelCase__ = success
UpperCAmelCase__ = time_spent[1:-1] + ", "
UpperCAmelCase__ = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
UpperCAmelCase__ = line.replace("FAILED ", "")
UpperCAmelCase__ = line.split()[0].replace("\n", "")
if "::" in line:
UpperCAmelCase__ , UpperCAmelCase__ = line.split("::")
else:
UpperCAmelCase__ , UpperCAmelCase__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase__ = all_failures[test] if test in all_failures else "N/A"
UpperCAmelCase__ = failure
break
UpperCAmelCase__ = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 26
|
from __future__ import annotations
import typing
from collections import Counter
def _a ( a :int ) -> typing.Counter[int]:
a = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a , max_perimeter + 1 ):
a = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a ):
a = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _a ( a :int = 1_000 ) -> int:
a = pythagorean_triple(a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 26
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
snake_case_ = logging.get_logger(__name__)
@dataclass
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self :List[str] , **lowercase_ :str ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
UpperCAmelCase = kwargs.pop('torchscript' , self.torchscript )
UpperCAmelCase = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
UpperCAmelCase = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**lowercase_ )
__UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Trace the models using torchscript"""} )
__UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
__UpperCamelCase = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def UpperCAmelCase__ ( self :Any ) -> Tuple["torch.device", int]:
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
UpperCAmelCase = torch.device('cpu' )
UpperCAmelCase = 0
elif is_torch_tpu_available():
UpperCAmelCase = xm.xla_device()
UpperCAmelCase = 0
else:
UpperCAmelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase__ ( self :List[str] ) -> int:
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase__ ( self :Tuple ) -> "torch.device":
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def UpperCAmelCase__ ( self :Dict ) -> Dict:
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]:
return self.n_gpu > 0
| 78
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""pixel_values"""]
def __init__( self :int , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :bool = True , lowercase_ :Union[int, float] = 1 / 2_55 , lowercase_ :bool = True , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = True , **lowercase_ :Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase = size if size is not None else {'height': 3_84, 'width': 3_84}
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase = do_convert_rgb
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase = (size['height'], size['width'])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :np.ndarray , lowercase_ :Union[int, float] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[int] , ) -> int:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :np.ndarray , lowercase_ :Union[float, List[float]] , lowercase_ :Union[float, List[float]] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[Any] , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :ImageInput , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Dict[str, int]] = None , lowercase_ :PILImageResampling = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[float] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :bool = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :Tuple , ) -> PIL.Image.Image:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ )
return encoded_outputs
| 78
| 1
|
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__snake_case : Optional[int] = 2_9979_2458
# Symbols
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = symbols('ct x y z')
def __lowerCamelCase ( __snake_case : float ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def __lowerCamelCase ( __snake_case : float ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(__snake_case ) ** 2 )
def __lowerCamelCase ( __snake_case : float ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(__snake_case ), -gamma(__snake_case ) * beta(__snake_case ), 0, 0],
[-gamma(__snake_case ) * beta(__snake_case ), gamma(__snake_case ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __lowerCamelCase ( __snake_case : float, __snake_case : np.ndarray | None = None ) -> np.ndarray:
"""simple docstring"""
if event is None:
A__ : Tuple =np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__snake_case ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__snake_case : Union[str, Any] = transform(2997_9245)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__snake_case : List[Any] = {ct: c, x: 1, y: 1, z: 1}
__snake_case : List[Any] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 136
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : int =tempfile.mkdtemp()
# fmt: off
A__ : Optional[int] =["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ : List[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Tuple =["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ : int ={"""unk_token""": """<unk>"""}
A__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
A__ : Dict ={
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
A__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , **lowerCAmelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ : int =[Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : int =self.get_tokenizer()
A__ : Optional[int] =self.get_rust_tokenizer()
A__ : Any =self.get_image_processor()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
A__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ )
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : List[Any] =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ : Union[str, Any] =self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : int =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Dict =self.prepare_image_inputs()
A__ : List[Any] =image_processor(lowerCAmelCase_ , return_tensors="""np""" )
A__ : List[Any] =processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Any ="""lower newer"""
A__ : Optional[int] =processor(text=lowerCAmelCase_ )
A__ : Optional[int] =tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Optional[Any] ="""lower newer"""
A__ : List[str] =self.prepare_image_inputs()
A__ : Optional[Any] =processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : List[str] =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Tuple =self.prepare_image_inputs()
A__ : str =self.prepare_image_inputs()
A__ : int =processor(images=lowerCAmelCase_ , visual_prompt=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.get_image_processor()
A__ : Optional[int] =self.get_tokenizer()
A__ : Any =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : List[Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : Tuple =processor.batch_decode(lowerCAmelCase_ )
A__ : List[Any] =tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 136
| 1
|
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : Tuple=36 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : int=6 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=1_000 , ) ->int:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = text_seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ = text_seq_length
snake_case_ = (image_size // patch_size) ** 2 + 1
snake_case_ = self.text_seq_length + self.image_seq_length
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = LayoutLMvaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
# text + image
snake_case_ = model(UpperCAmelCase_ , pixel_values=UpperCAmelCase_ )
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ = model(pixel_values=UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LayoutLMvaForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LayoutLMvaForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
snake_case_ = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = False
__lowercase: str = False
__lowercase: Any = False
__lowercase: Union[str, Any] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase: Any = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ) ->Optional[int]:
"""simple docstring"""
return True
def lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
snake_case_ = LayoutLMvaModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=False ) ->List[str]:
"""simple docstring"""
snake_case_ = copy.deepcopy(UpperCAmelCase_ )
if model_class in get_values(UpperCAmelCase_ ):
snake_case_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
snake_case_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in get_values(UpperCAmelCase_ ):
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in [
*get_values(UpperCAmelCase_ ),
]:
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in [
*get_values(UpperCAmelCase_ ),
]:
snake_case_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase_ , )
return inputs_dict
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LayoutLMvaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( ) -> Optional[Any]:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
snake_case_ = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values.to(UpperCAmelCase_ )
snake_case_ = torch.tensor([[1, 2]] )
snake_case_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ = model(
input_ids=input_ids.to(UpperCAmelCase_ ) , bbox=bbox.to(UpperCAmelCase_ ) , pixel_values=pixel_values.to(UpperCAmelCase_ ) , )
# verify the logits
snake_case_ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 347
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE
def _a ( ) -> List[str]:
snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case_ = parser.parse_args()
return args
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
try:
if args.tpu_name:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE )
return tpu
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = 0
for file in file_list:
snake_case_ = file.split("""/""" )[-1]
snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 )
snake_case_ = int(_SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE )
if shuffle:
snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) )
snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE )
return dataset
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
if not args.no_tpu:
snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ = tokenizer.vocab_size
snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ , snake_case_ = create_optimizer(
num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] )
def decode_fn(_SCREAMING_SNAKE_CASE ):
snake_case_ = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
def mask_with_collator(_SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
snake_case_ = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case_ , snake_case_ = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , )
return batch
snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , )
snake_case_ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) )
model.fit(
_SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args()
main(args)
| 347
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__UpperCamelCase : int = logging.getLogger(__name__)
__UpperCamelCase : Any = {"facebook/bart-base": BartForConditionalGeneration}
__UpperCamelCase : List[str] = {"facebook/bart-base": BartTokenizer}
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : str = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--config_name''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=SCREAMING_SNAKE_CASE , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Where to store the final ONNX file.''' )
UpperCamelCase__ : Union[str, Any] = parser.parse_args()
return args
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any]="cpu" ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : List[Any] = None
UpperCamelCase__ : List[str] = 0
return huggingface_model, tokenizer
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
model.eval()
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Dict = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
UpperCamelCase__ : List[Any] = '''My friends are cool but they eat too many carbs.'''
UpperCamelCase__ : Tuple = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
UpperCamelCase__ : List[str] = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=SCREAMING_SNAKE_CASE , )
logger.info('''Model exported to {}'''.format(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : int = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = ort_sess.run(
SCREAMING_SNAKE_CASE , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(SCREAMING_SNAKE_CASE ),
'''max_length''': np.array(SCREAMING_SNAKE_CASE ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Dict = parse_args()
UpperCamelCase__ : Dict = 5
UpperCamelCase__ : int = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCamelCase__ : str = torch.device(args.device )
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(SCREAMING_SNAKE_CASE )
if args.max_length:
UpperCamelCase__ : int = args.max_length
if args.num_beams:
UpperCamelCase__ : List[str] = args.num_beams
if args.output_file_path:
UpperCamelCase__ : Any = args.output_file_path
else:
UpperCamelCase__ : Dict = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 51
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __lowerCAmelCase):
A: Optional[int] = ["image_processor", "tokenizer"]
A: int = "FlavaImageProcessor"
A: List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : int ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
UpperCamelCase__ : Union[str, Any] = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[str] = self.image_processor
def __call__( self : int , lowerCamelCase__ : Optional[ImageInput] = None , lowerCamelCase__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : List[str] , ) -> Any:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase__ : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
if images is not None:
UpperCamelCase__ : Optional[int] = self.image_processor(
lowerCamelCase__ , return_image_mask=lowerCamelCase__ , return_codebook_pixels=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCamelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Dict , *lowerCamelCase__ : Dict , **lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.tokenizer.model_input_names
UpperCamelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 51
| 1
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A__: Optional[int] = random.Random()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any]=1.0 ,_UpperCAmelCase : List[str]=None ,_UpperCAmelCase : List[str]=None ) -> Optional[int]:
if rng is None:
_a : Optional[int] =global_rng
_a : str =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
def __init__( self :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple=7 , SCREAMING_SNAKE_CASE :Union[str, Any]=4_0_0 , SCREAMING_SNAKE_CASE :str=2_0_0_0 , SCREAMING_SNAKE_CASE :str=1_0 , SCREAMING_SNAKE_CASE :Tuple=1_6_0 , SCREAMING_SNAKE_CASE :List[str]=8 , SCREAMING_SNAKE_CASE :Optional[int]=0.0 , SCREAMING_SNAKE_CASE :Union[str, Any]=4_0_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :List[str]=True , ) -> int:
'''simple docstring'''
_a : List[Any] =parent
_a : Optional[Any] =batch_size
_a : int =min_seq_length
_a : Tuple =max_seq_length
_a : List[str] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] =padding_value
_a : int =sampling_rate
_a : List[str] =return_attention_mask
_a : Union[str, Any] =do_normalize
_a : Tuple =feature_size
_a : str =chunk_length
_a : Any =hop_length
def __UpperCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[str]=False , SCREAMING_SNAKE_CASE :Any=False ) -> List[Any]:
'''simple docstring'''
def _flatten(SCREAMING_SNAKE_CASE :Optional[int] ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE ) )
if equal_length:
_a : Dict =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : Optional[Any] =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str =[np.asarray(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = WhisperFeatureExtractor if is_speech_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =WhisperFeatureExtractionTester(self )
def __UpperCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : int =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[Any] =feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =feat_extract_first.to_dict()
_a : Optional[int] =feat_extract_second.to_dict()
_a : str =feat_extract_first.mel_filters
_a : List[str] =feat_extract_second.mel_filters
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
_a : Any =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[Any] =os.path.join(SCREAMING_SNAKE_CASE , """feat_extract.json""" )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : Dict =self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE )
_a : int =feat_extract_first.to_dict()
_a : Any =feat_extract_second.to_dict()
_a : List[str] =feat_extract_first.mel_filters
_a : List[Any] =feat_extract_second.mel_filters
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_a : str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_a : List[str] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : str =[np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
_a : Any =feature_extractor(SCREAMING_SNAKE_CASE , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_a : Dict =feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_a : Optional[Any] =feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
_a : Optional[int] =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
_a : Optional[int] =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_a : int =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : List[str] =np.asarray(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
_a : Optional[Any] =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test truncation required
_a : Dict =[floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_a : Dict =[np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
_a : Union[str, Any] =[x[: feature_extractor.n_samples] for x in speech_inputs]
_a : Dict =[np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs_truncated]
_a : Dict =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
_a : Optional[int] =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __UpperCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
import torch
_a : List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : int =np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_a : Optional[int] =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_a : str =feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_a : Union[str, Any] =feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Tuple ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[Any] =load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_a : Optional[Any] =ds.sort("""id""" ).select(range(SCREAMING_SNAKE_CASE ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
# fmt: off
_a : Any =torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
_a : Any =self._load_datasamples(1 )
_a : int =WhisperFeatureExtractor()
_a : str =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
def __UpperCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
_a : Dict =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : str =self._load_datasamples(1 )[0]
_a : List[Any] =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_a : Union[str, Any] =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=SCREAMING_SNAKE_CASE )[0]
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE ) - 1 ) < 1e-3 ) )
| 276
|
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A__: Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> Optional[int]:
_a : Any =tesseract_config if tesseract_config is not None else """"""
# apply OCR
_a : Optional[Any] =to_pil_image(_UpperCAmelCase )
_a , _a : List[Any] =pil_image.size
_a : List[str] =pytesseract.image_to_data(_UpperCAmelCase ,lang=_UpperCAmelCase ,output_type="""dict""" ,config=_UpperCAmelCase )
_a , _a , _a , _a , _a : str =data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_a : Tuple =[idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
_a : List[Any] =[word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Dict =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : List[str] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : List[str] =[]
for x, y, w, h in zip(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
_a : int =[x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
_a : str =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[Any] = ["pixel_values"]
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = "" , **SCREAMING_SNAKE_CASE :Tuple , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : List[Any] =size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Tuple =get_size_dict(SCREAMING_SNAKE_CASE )
_a : Dict =do_resize
_a : Tuple =size
_a : str =resample
_a : Dict =apply_ocr
_a : Union[str, Any] =ocr_lang
_a : Dict =tesseract_config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
_a : int =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_a : Any =(size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
_a : Optional[int] =do_resize if do_resize is not None else self.do_resize
_a : Optional[int] =size if size is not None else self.size
_a : str =get_size_dict(SCREAMING_SNAKE_CASE )
_a : List[str] =resample if resample is not None else self.resample
_a : int =apply_ocr if apply_ocr is not None else self.apply_ocr
_a : str =ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Union[str, Any] =tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[str] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_a : List[Any] =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_a : Any =[]
_a : Any =[]
for image in images:
_a , _a : int =apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
_a : Union[str, Any] =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_a : Dict =[flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
_a : str =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : str =BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
_a : List[Any] =words_batch
_a : Dict =boxes_batch
return data
| 276
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[float] ) -> bool:
"""simple docstring"""
if len(UpperCAmelCase__ ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
UpperCamelCase :List[Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
import string
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase :List[str] = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase :Optional[Any] = string.ascii_uppercase.find(__magic_name__ )
UpperCamelCase :Any = num - key
if num < 0:
UpperCamelCase :Optional[Any] = num + len(string.ascii_uppercase )
UpperCamelCase :int = translated + string.ascii_uppercase[num]
else:
UpperCamelCase :Optional[Any] = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
UpperCamelCase :List[Any] = input("""Encrypted message: """ )
UpperCamelCase :Optional[Any] = message.upper()
decrypt(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 62
| 0
|
def lowerCAmelCase_ ( snake_case_ ):
_A : str = [0 for i in range(len(snake_case_ ) )]
# initialize interval's left pointer and right pointer
_A , _A : Any = 0, 0
for i in range(1,len(snake_case_ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_A : str = min(right_pointer - i + 1,z_result[i - left_pointer] )
_A : Optional[int] = min_edge
while go_next(snake_case_,snake_case_,snake_case_ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_A , _A : Tuple = i, i + z_result[i] - 1
return z_result
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
return i + z_result[i] < len(snake_case_ ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_A : Optional[Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case_ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( UpperCamelCase__ ):
_a = "fnet"
def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Any = vocab_size
_A : str = max_position_embeddings
_A : Optional[Any] = hidden_size
_A : List[str] = num_hidden_layers
_A : List[str] = intermediate_size
_A : List[Any] = hidden_act
_A : List[str] = hidden_dropout_prob
_A : List[str] = initializer_range
_A : List[Any] = type_vocab_size
_A : List[Any] = layer_norm_eps
_A : List[str] = use_tpu_fourier_optimizations
_A : str = tpu_short_seq_length
| 26
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Dict = StableDiffusionInpaintPipeline
lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase : Optional[int] = frozenset([] )
def a__ ( self :Any ):
torch.manual_seed(0 )
snake_case_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,)
snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
snake_case_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,)
snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase )
snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_UpperCamelCase ).startswith("""mps""" ):
snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self :Any ):
snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase )
snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self :Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a__ ( self :Tuple ):
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a__ ( self :Union[str, Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" )
snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Tuple = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case_ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 8
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
lowercase : str = ['input_values', 'padding_mask']
def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,):
super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Dict = chunk_length_s
snake_case_ : str = overlap
@property
def a__ ( self :Any ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__ ( self :List[str] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
snake_case_ : Tuple = True
snake_case_ : str = bool(
isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ):
snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa )
elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
snake_case_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(_UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio )
snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) )
snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
snake_case_ : Any = max(array.shape[0] for array in raw_audio )
snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) )
snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
snake_case_ : Union[str, Any] = """max_length"""
else:
snake_case_ : int = input_values
# normal padding on batch
if padded_inputs is None:
snake_case_ : Optional[int] = self.pad(
_UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,)
if padding:
snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" )
snake_case_ : Optional[int] = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
snake_case_ : Dict = example[..., None]
input_values.append(example.T )
snake_case_ : List[Any] = input_values
if return_tensors is not None:
snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs
| 8
| 1
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _SCREAMING_SNAKE_CASE () -> Optional[int]:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=__lowerCAmelCase , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=__lowerCAmelCase , default=5 )
parser.add_argument("""--batch_size""" , type=__lowerCAmelCase , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=__lowerCAmelCase , default=1 )
parser.add_argument("""--freeze""" , type=__lowerCAmelCase , default=__lowerCAmelCase )
parser.add_argument("""--learning_rate""" , type=__lowerCAmelCase , default=5E-4 )
parser.add_argument("""--seed""" , type=__lowerCAmelCase , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=__lowerCAmelCase , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=__lowerCAmelCase , default=10 )
parser.add_argument("""--weight_decay""" , type=__lowerCAmelCase , default=0.01 )
parser.add_argument("""--output_dir""" , type=__lowerCAmelCase , default="""./results""" )
return parser.parse_args()
UpperCAmelCase : Tuple = load("accuracy")
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = eval_pred
lowercase_ = np.argmax(__lowerCAmelCase , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=__lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : int):
"""simple docstring"""
super().__init__()
lowercase_ = trainer
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : List[str]):
"""simple docstring"""
if control.should_evaluate:
lowercase_ = deepcopy(lowerCAmelCase_)
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""")
return control_copy
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ = get_args()
set_seed(args.seed )
lowercase_ = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
lowercase_ = dataset.train_test_split(test_size=0.2 )
lowercase_ = train_test["""test"""].train_test_split(test_size=0.5 )
lowercase_ = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
lowercase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase_ = tokenizer.eos_token
lowercase_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowercase_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowercase_ = False
lowercase_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(__lowerCAmelCase ):
lowercase_ = tokenizer(example["""src"""] , truncation=__lowerCAmelCase , max_length=10_24 )
lowercase_ = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowercase_ = train_test_validation.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=train_test_validation["""train"""].column_names , )
lowercase_ = DataCollatorWithPadding(tokenizer=__lowerCAmelCase )
lowercase_ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
lowercase_ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , )
print("""Training...""" )
trainer.add_callback(CustomCallback(__lowerCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 136
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 136
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : str = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A_ ( _a ):
lowerCAmelCase__ = 'gpt_neo'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self: Union[str, Any] ,__lowerCAmelCase: List[str]=50_257 ,__lowerCAmelCase: str=2_048 ,__lowerCAmelCase: Optional[int]=2_048 ,__lowerCAmelCase: Any=24 ,__lowerCAmelCase: Union[str, Any]=[[["global", "local"], 12]] ,__lowerCAmelCase: Any=16 ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Union[str, Any]=256 ,__lowerCAmelCase: Optional[Any]="gelu_new" ,__lowerCAmelCase: Optional[Any]=0.0 ,__lowerCAmelCase: str=0.0 ,__lowerCAmelCase: Dict=0.0 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: int=1e-5 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: int=True ,__lowerCAmelCase: Dict=50_256 ,__lowerCAmelCase: List[str]=50_256 ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[Any] = num_layers
_lowerCamelCase : str = num_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Any = window_size
_lowerCamelCase : List[str] = activation_function
_lowerCamelCase : Union[str, Any] = resid_dropout
_lowerCamelCase : List[Any] = embed_dropout
_lowerCamelCase : Dict = attention_dropout
_lowerCamelCase : Optional[int] = classifier_dropout
_lowerCamelCase : Optional[int] = layer_norm_epsilon
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[str] = use_cache
_lowerCamelCase : Optional[int] = bos_token_id
_lowerCamelCase : Tuple = eos_token_id
_lowerCamelCase : List[Any] = attention_types
_lowerCamelCase : Optional[int] = self.expand_attention_types_params(__lowerCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
@staticmethod
def _lowercase ( __lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : int = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
import torch
_lowerCamelCase : Union[str, Any] = input.size()
_lowerCamelCase : List[Any] = len(_lowerCamelCase )
_lowerCamelCase : Dict = shape[dimension]
_lowerCamelCase : str = torch.arange(0 , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[int] = torch.div(sizedim - size , _lowerCamelCase , rounding_mode="floor" ) + 1
_lowerCamelCase : Dict = torch.arange(_lowerCamelCase ) + low_indices[:min_length][:, None]
_lowerCamelCase : Any = [slice(_lowerCamelCase )] * rank
_lowerCamelCase : Dict = indices
_lowerCamelCase : Optional[Any] = input[s]
_lowerCamelCase : str = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
import torch
_lowerCamelCase : int = torch.arange(1 , _lowerCamelCase )
_lowerCamelCase : List[Any] = torch.remainder(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Any = remainders == 0
_lowerCamelCase : Dict = candidates[divisor_indices]
_lowerCamelCase : Any = torch.max(_lowerCamelCase )
return largest_divisor, torch.div(_lowerCamelCase , _lowerCamelCase , rounding_mode="floor" )
class A_ ( _a ):
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase ,direction="inputs" )
_lowerCamelCase : Tuple = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCamelCase : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return self._config.num_heads
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: PreTrainedTokenizer ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[TensorType] = None ,):
'''simple docstring'''
_lowerCamelCase : Tuple = super(__lowerCAmelCase ,self ).generate_dummy_inputs(
__lowerCAmelCase ,batch_size=__lowerCAmelCase ,seq_length=__lowerCAmelCase ,is_pair=__lowerCAmelCase ,framework=__lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
_lowerCamelCase : int = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCamelCase, _lowerCamelCase : List[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCamelCase : Any = seqlen + 2
_lowerCamelCase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCamelCase : str = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers )
]
_lowerCamelCase : Any = common_inputs["attention_mask"]
if self.use_past:
_lowerCamelCase : Optional[Any] = ordered_inputs["attention_mask"].dtype
_lowerCamelCase : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCAmelCase ,__lowerCAmelCase ,dtype=__lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return 13
| 340
|
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( _a ):
lowerCAmelCase__ = 'char'
lowerCAmelCase__ = 'bpe'
lowerCAmelCase__ = 'wp'
_lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( _a ):
lowerCAmelCase__ = ['image_processor', 'char_tokenizer']
lowerCAmelCase__ = 'ViTImageProcessor'
lowerCAmelCase__ = 'MgpstrTokenizer'
def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,__lowerCAmelCase ,)
_lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" )
_lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is not None:
_lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCamelCase : Tuple = encodings["input_ids"]
return inputs
def _lowercase ( self: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences
_lowerCamelCase : Dict = char_preds.size(0 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" )
_lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" )
_lowerCamelCase : List[str] = []
_lowerCamelCase : str = []
for i in range(__lowerCAmelCase ):
_lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = final_strs
_lowerCamelCase : int = final_scores
_lowerCamelCase : str = char_strs
_lowerCamelCase : Dict = bpe_strs
_lowerCamelCase : int = wp_strs
return out
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_lowerCamelCase : int = self.char_decode
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Optional[int] = "[s]"
elif format == DecodeType.BPE:
_lowerCamelCase : Dict = self.bpe_decode
_lowerCamelCase : str = 2
_lowerCamelCase : Union[str, Any] = "#"
elif format == DecodeType.WORDPIECE:
_lowerCamelCase : int = self.wp_decode
_lowerCamelCase : List[str] = 102
_lowerCamelCase : List[Any] = "[SEP]"
else:
raise ValueError(F"""Format {format} is not supported.""" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], []
_lowerCamelCase : Any = pred_logits.size(0 )
_lowerCamelCase : int = pred_logits.size(1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:]
_lowerCamelCase : List[str] = decoder(__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 )
_lowerCamelCase : Any = preds_max_prob[:, 1:]
for index in range(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_str[index][:pred_eos]
_lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist()
_lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1
_lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
_lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCAmelCase )
conf_scores.append(__lowerCAmelCase )
return dec_strs, conf_scores
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
| 340
| 1
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def A (__A : NDArray[floataa] , __A : NDArray[floataa] , __A : list[int] , __A : int , ) -> list[float]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = coefficient_matrix.shape
UpperCAmelCase_ , UpperCAmelCase_ = constant_matrix.shape
if rowsa != colsa:
UpperCAmelCase_ = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__A )
if colsa != 1:
UpperCAmelCase_ = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__A )
if rowsa != rowsa:
UpperCAmelCase_ = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__A )
if len(__A ) != rowsa:
UpperCAmelCase_ = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(__A )} and {rowsa}"""
)
raise ValueError(__A )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
UpperCAmelCase_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCAmelCase_ , UpperCAmelCase_ = table.shape
strictly_diagonally_dominant(__A )
# Iterates the whole matrix for given number of times
for _ in range(__A ):
UpperCAmelCase_ = []
for row in range(__A ):
UpperCAmelCase_ = 0
for col in range(__A ):
if col == row:
UpperCAmelCase_ = table[row][col]
elif col == cols - 1:
UpperCAmelCase_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCAmelCase_ = (temp + val) / denom
new_val.append(__A )
UpperCAmelCase_ = new_val
return [float(__A ) for i in new_val]
def A (__A : NDArray[floataa] ) -> bool:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = table.shape
UpperCAmelCase_ = True
for i in range(0 , __A ):
UpperCAmelCase_ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[int] = TypeVar("DatasetType", Dataset, IterableDataset)
def A (__A : List[DatasetType] , __A : Optional[List[float]] = None , __A : Optional[int] = None , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
else:
return _interleave_iterable_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
def A (__A : List[DatasetType] , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__A , info=__A , split=__A , axis=__A )
else:
return _concatenate_iterable_datasets(__A , info=__A , split=__A , axis=__A )
| 51
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : Optional[int] = None
A : Optional[jnp.ndarray] = None
A : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _lowerCAmelCase ( cls ) -> int:
return cls()
@dataclass
class UpperCAmelCase_ ( snake_case_ ):
'''simple docstring'''
A : jnp.ndarray
A : jnp.ndarray
A : KarrasVeSchedulerState
class UpperCAmelCase_ ( snake_case_ , snake_case_ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ) -> Dict:
return True
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 1.007 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 0.05 , _SCREAMING_SNAKE_CASE = 50 , ) -> Optional[Any]:
pass
def _lowerCAmelCase ( self ) -> List[str]:
return KarrasVeSchedulerState.create()
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = () ) -> KarrasVeSchedulerState:
snake_case_ : Tuple = jnp.arange(0 , _A )[::-1].copy()
snake_case_ : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_A , schedule=jnp.array(_A , dtype=jnp.floataa ) , timesteps=_A , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
snake_case_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
snake_case_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
snake_case_ : str = random.split(_A , num=1 )
snake_case_ : Optional[Any] = self.config.s_noise * random.normal(key=_A , shape=sample.shape )
snake_case_ : Any = sigma + gamma * sigma
snake_case_ : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
snake_case_ : Tuple = sample_hat + sigma_hat * model_output
snake_case_ : str = (sample_hat - pred_original_sample) / sigma_hat
snake_case_ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
snake_case_ : Union[str, Any] = sample_prev + sigma_prev * model_output
snake_case_ : Tuple = (sample_prev - pred_original_sample) / sigma_prev
snake_case_ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
raise NotImplementedError()
| 371
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowercase : Optional[Any] = '''src/diffusers'''
# Matches is_xxx_available()
lowercase : Any = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
lowercase : Tuple = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
lowercase : Optional[Any] = '''
{0} = None
'''
lowercase : Union[str, Any] = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
lowercase : List[str] = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def lowerCAmelCase__ ( _a : Union[str, Any] ):
snake_case_ : str = _re_backend.findall(_a )
if len(_a ) == 0:
return None
return "_and_".join(_a )
def lowerCAmelCase__ ( ):
with open(os.path.join(_a , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
snake_case_ : str = f.readlines()
# Get to the point we do the actual imports for type checking
snake_case_ : List[Any] = 0
snake_case_ : Optional[int] = {}
# Go through the end of the file
while line_index < len(_a ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case_ : Optional[int] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
snake_case_ : Dict = []
# Until we unindent, add backend objects to the list
while line_index < len(_a ) and len(lines[line_index] ) > 1:
snake_case_ : Any = lines[line_index]
snake_case_ : Tuple = _re_single_line_import.search(_a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_a ) > 0:
snake_case_ : List[str] = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase__ ( _a : Any , _a : Union[str, Any] ):
if name.isupper():
return DUMMY_CONSTANT.format(_a )
elif name.islower():
return DUMMY_FUNCTION.format(_a , _a )
else:
return DUMMY_CLASS.format(_a , _a )
def lowerCAmelCase__ ( _a : Optional[Any]=None ):
if backend_specific_objects is None:
snake_case_ : Dict = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case_ : Optional[Any] = {}
for backend, objects in backend_specific_objects.items():
snake_case_ : List[Any] = "[" + ", ".join(F'''"{b}"''' for b in backend.split("_and_" ) ) + "]"
snake_case_ : Union[str, Any] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_a , _a ) for o in objects] )
snake_case_ : List[Any] = dummy_file
return dummy_files
def lowerCAmelCase__ ( _a : Tuple=False ):
snake_case_ : Any = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case_ : str = {"torch": "pt"}
# Locate actual dummy modules and read their content.
snake_case_ : Tuple = os.path.join(_a , "utils" )
snake_case_ : str = {
backend: os.path.join(_a , F'''dummy_{short_names.get(_a , _a )}_objects.py''' )
for backend in dummy_files.keys()
}
snake_case_ : Union[str, Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_a ):
with open(_a , "r" , encoding="utf-8" , newline="\n" ) as f:
snake_case_ : Optional[int] = f.read()
else:
snake_case_ : str = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(_a , _a )}_objects.py as the main '''
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'''diffusers.utils.dummy_{short_names.get(_a , _a )}_objects.py. Run `make fix-copies` '''
"to fix this." )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase : str = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 36
| 0
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ = TF_MODEL_FOR_MASKED_LM_MAPPING
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
lowerCamelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
lowerCamelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
lowerCamelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
lowerCamelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
lowerCamelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
lowerCamelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
lowerCamelCase_ = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
lowerCamelCase_ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase , lowercase )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(lowercase )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
lowerCamelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase ) , [
{"sequence": "My name is John", "score": 0.0_0_8, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_0_7, "token": 1573, "token_str": " Chris"},
] , )
lowerCamelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_5_1,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_1_4,
"token": 12790,
"token_str": " Lyon",
},
] , )
lowerCamelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase ) , [
{"sequence": "My name is Patrick", "score": 0.0_0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_0_0, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_0_0, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
lowerCamelCase_ = None
lowerCamelCase_ = None
self.run_pipeline_test(lowercase , [] )
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
lowerCamelCase_ = None
lowerCamelCase_ = None
self.run_pipeline_test(lowercase , [] )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = fill_masker.tokenizer
lowerCamelCase_ = fill_masker.model
lowerCamelCase_ = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
lowercase , [
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
] , )
with self.assertRaises(lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowercase ):
fill_masker("This is" )
self.run_test_top_k(lowercase , lowercase )
self.run_test_targets(lowercase , lowercase )
self.run_test_top_k_targets(lowercase , lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(lowercase , lowercase )
self.fill_mask_with_multiple_masks(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = tokenizer.get_vocab()
lowerCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase , targets=lowercase )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase )
lowerCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase ) )
# Call argument
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase )
lowerCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase ) )
# Score equivalence
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase )
lowerCamelCase_ = [top_mask["token_str"] for top_mask in outputs]
lowerCamelCase_ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ) == set(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase )
lowerCamelCase_ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
# Raises with invalid
with self.assertRaises(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""] )
with self.assertRaises(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets="" )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase , top_k=2 )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = tokenizer.get_vocab()
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
# top_k=2, ntargets=3
lowerCamelCase_ = sorted(vocab.keys() )[:3]
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCamelCase_ = [el["token_str"] for el in sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ).issubset(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCamelCase_ = sorted(vocab.keys() )[:3]
lowerCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCamelCase_ = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase ) , 3 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
lowercase , [
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
] , )
| 19
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = TransfoXLTokenizer
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
def _a ( self ) -> Union[str, Any]:
super().setUp()
__UpperCamelCase =[
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _a ( self , **A_ ) -> Optional[int]:
__UpperCamelCase =True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase ='<unk> UNwanted , running'
__UpperCamelCase ='<unk> unwanted, running'
return input_text, output_text
def _a ( self ) -> str:
__UpperCamelCase =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ )
__UpperCamelCase =tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] )
def _a ( self ) -> Any:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> int:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
__UpperCamelCase ='Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
__UpperCamelCase =[
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =len(A_ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 62
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : int = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['YolosFeatureExtractor']
_snake_case : Optional[int] = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE : List[str] = frozenset([] )
def snake_case__( self : int ) ->Optional[Any]:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCamelCase , )
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
snake_case_ = CLIPTextModel(_UpperCamelCase )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any]=0 ) ->List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('''RGB''' ).resize((6_4, 6_4) )
snake_case_ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((6_4, 6_4) )
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self : Union[str, Any] ) ->Optional[Any]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**_UpperCamelCase )
snake_case_ = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = sd_pipe(**_UpperCamelCase ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : str ) ->int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[int] ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase , safety_checker=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , generator=_UpperCamelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def snake_case__( self : str ) ->Optional[int]:
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase , torch_dtype=torch.floataa , safety_checker=_UpperCamelCase , )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , generator=_UpperCamelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def snake_case__( self : Union[str, Any] ) ->int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = PNDMScheduler.from_pretrained(_UpperCamelCase , subfolder='''scheduler''' )
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase , safety_checker=_UpperCamelCase , scheduler=_UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type='''np''' , )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 8
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = LxmertTokenizer
def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_UpperCamelCase )
snake_case_ = do_lower_case
def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 8
| 1
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __lowerCamelCase :
"""simple docstring"""
pass
| 227
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : str=0.999 ,lowerCamelCase : int="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase : List[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_A : Tuple = []
for i in range(lowerCamelCase ):
_A : List[Any] = i / num_diffusion_timesteps
_A : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) )
return torch.tensor(lowerCamelCase ,dtype=torch.floataa )
class __lowerCamelCase ( a_ , a_ ):
"""simple docstring"""
a = [e.name for e in KarrasDiffusionSchedulers]
a = 2
@register_to_config
def __init__( self : int , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0_0085 , SCREAMING_SNAKE_CASE : float = 0.012 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : str = "linspace" , SCREAMING_SNAKE_CASE : int = 0 , ):
if trained_betas is not None:
_A : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "linear":
_A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A : Any = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A : Optional[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE)
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}')
_A : Any = 1.0 - self.betas
_A : List[Any] = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=None):
if schedule_timesteps is None:
_A : Dict = self.timesteps
_A : List[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
_A : Dict = 1 if len(SCREAMING_SNAKE_CASE) > 1 else 0
else:
_A : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep
_A : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A ( self : Optional[Any]):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ):
_A : Tuple = self.index_for_timestep(SCREAMING_SNAKE_CASE)
if self.state_in_first_order:
_A : Any = self.sigmas[step_index]
else:
_A : int = self.sigmas_interpol[step_index]
_A : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , ):
_A : Optional[Any] = num_inference_steps
_A : int = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_A : Tuple = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE)[::-1].copy()
elif self.config.timestep_spacing == "leading":
_A : Optional[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : int = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_A : List[str] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : str = (np.arange(SCREAMING_SNAKE_CASE , 0 , -step_ratio)).round().copy().astype(SCREAMING_SNAKE_CASE)
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.')
_A : List[str] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
_A : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE)
_A : str = np.interp(SCREAMING_SNAKE_CASE , np.arange(0 , len(SCREAMING_SNAKE_CASE)) , SCREAMING_SNAKE_CASE)
_A : str = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
_A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(device=SCREAMING_SNAKE_CASE)
# interpolate sigmas
_A : Optional[int] = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
_A : Any = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
_A : List[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
# mps does not support float64
_A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
else:
_A : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
# interpolate timesteps
_A : Optional[int] = self.sigma_to_t(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=timesteps.dtype)
_A : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
_A : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps])
_A : str = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_A : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]):
# get log sigma
_A : Dict = sigma.log()
# get distribution
_A : Any = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_A : Tuple = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
_A : Union[str, Any] = low_idx + 1
_A : Dict = self.log_sigmas[low_idx]
_A : List[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
_A : Dict = (low - log_sigma) / (low - high)
_A : Union[str, Any] = w.clamp(0 , 1)
# transform interpolation to time range
_A : int = (1 - w) * low_idx + w * high_idx
_A : Any = t.view(sigma.shape)
return t
@property
def A ( self : Any):
return self.sample is None
def A ( self : int , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : bool = True , ):
_A : Optional[int] = self.index_for_timestep(SCREAMING_SNAKE_CASE)
# advance index counter by 1
_A : Dict = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_A : Tuple = self.sigmas[step_index]
_A : Dict = self.sigmas_interpol[step_index + 1]
_A : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_A : int = self.sigmas[step_index - 1]
_A : Union[str, Any] = self.sigmas_interpol[step_index]
_A : Dict = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_A : List[Any] = 0
_A : Dict = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_A : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
_A : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = sigma_hat if self.state_in_first_order else sigma_interpol
_A : Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample')
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`')
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_A : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_A : str = sigma_interpol - sigma_hat
# store for 2nd order step
_A : List[str] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_A : List[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_A : Optional[int] = sigma_next - sigma_hat
_A : str = self.sample
_A : Any = None
_A : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE)
def A ( self : Any , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE):
# mps does not support float64
_A : Any = self.timesteps.to(original_samples.device , dtype=torch.floataa)
_A : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
_A : str = self.timesteps.to(original_samples.device)
_A : str = timesteps.to(original_samples.device)
_A : int = [self.index_for_timestep(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for t in timesteps]
_A : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
_A : List[Any] = sigma.unsqueeze(-1)
_A : Dict = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[Any]):
return self.config.num_train_timesteps
| 227
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""gpt_neo"""
a_ =["""past_key_values"""]
a_ ={"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __UpperCAmelCase=50257 , __UpperCAmelCase=2048 , __UpperCAmelCase=2048 , __UpperCAmelCase=24 , __UpperCAmelCase=[[["global", "local"], 12]] , __UpperCAmelCase=16 , __UpperCAmelCase=None , __UpperCAmelCase=256 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , **__UpperCAmelCase , )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = window_size
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = resid_dropout
lowerCAmelCase__ = embed_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = classifier_dropout
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = attention_types
lowerCAmelCase__ = self.expand_attention_types_params(__UpperCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
import torch
lowerCAmelCase__ = input.size()
lowerCAmelCase__ = len(UpperCamelCase_ )
lowerCAmelCase__ = shape[dimension]
lowerCAmelCase__ = torch.arange(0 , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = torch.div(sizedim - size , UpperCamelCase_ , rounding_mode="floor" ) + 1
lowerCAmelCase__ = torch.arange(UpperCamelCase_ ) + low_indices[:min_length][:, None]
lowerCAmelCase__ = [slice(UpperCamelCase_ )] * rank
lowerCAmelCase__ = indices
lowerCAmelCase__ = input[s]
lowerCAmelCase__ = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str ) -> int:
"""simple docstring"""
import torch
lowerCAmelCase__ = torch.arange(1 , UpperCamelCase_ )
lowerCAmelCase__ = torch.remainder(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = remainders == 0
lowerCAmelCase__ = candidates[divisor_indices]
lowerCAmelCase__ = torch.max(UpperCamelCase_ )
return largest_divisor, torch.div(UpperCamelCase_ , UpperCamelCase_ , rounding_mode="floor" )
class lowercase__ ( _UpperCAmelCase ):
@property
def UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCAmelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" )
lowerCAmelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self._config.num_heads
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , )-> Mapping[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase__ = seqlen + 2
lowerCAmelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase__ = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase__ = ordered_inputs["attention_mask"].dtype
lowerCAmelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return 13
| 340
|
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340
| 1
|
from functools import reduce
_UpperCamelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCamelCase_( snake_case__: str = N ) -> List[str]:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case__ , snake_case__ : str(int(UpperCamelCase__ ) * int(UpperCamelCase__ ) ) , n[i : i + 13] ) )
for i in range(len(UpperCamelCase__ ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 352
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
'''simple docstring'''
def __init__(self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 256
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = cva.imread(__a , 0 )
UpperCAmelCase__ = copy.deepcopy(self.img )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCAmelCase__ = np.sum(__a )
for i in range(len(__a ) ):
UpperCAmelCase__ = x[i] / self.k
self.sk += prk
UpperCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase__ = int(last % last )
UpperCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__a )
UpperCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase__ = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCamelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_UpperCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 335
| 0
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( __A ):
__A : List[Any] = (UnCLIPScheduler,)
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> List[str]:
lowercase__ : int = {
"num_train_timesteps": 10_00,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowercase_ )
return config
def __UpperCamelCase ( self : Tuple ) -> Tuple:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase_ )
def __UpperCamelCase ( self : int ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def __UpperCamelCase ( self : str ) -> int:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[int]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase_ )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase_ , prev_timestep=lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : Optional[Any] = self.get_scheduler_config(variance_type="fixed_small_log" )
lowercase__ : Optional[int] = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_99_49_87 ) ) < 1E-5
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : Optional[Any] = self.get_scheduler_config(variance_type="learned_range" )
lowercase__ : Any = scheduler_class(**lowercase_ )
lowercase__ : Tuple = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase_ ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=lowercase_ ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=lowercase_ ) - -0.0_01_00_11 < 1E-5
def __UpperCamelCase ( self : str ) -> List[Any]:
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = scheduler_class(**lowercase_ )
lowercase__ : Union[str, Any] = scheduler.timesteps
lowercase__ : List[Any] = self.dummy_model()
lowercase__ : str = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0 )
for i, t in enumerate(lowercase_ ):
# 1. predict noise residual
lowercase__ : Optional[int] = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : List[str] = pred_prev_sample
lowercase__ : List[str] = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : List[Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
lowercase__ : Optional[Any] = self.scheduler_classes[0]
lowercase__ : Any = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(25 )
lowercase__ : Union[str, Any] = scheduler.timesteps
lowercase__ : Tuple = self.dummy_model()
lowercase__ : int = self.dummy_sample_deter
lowercase__ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(lowercase_ ):
# 1. predict noise residual
lowercase__ : Tuple = model(lowercase_ , lowercase_ )
if i + 1 == timesteps.shape[0]:
lowercase__ : int = None
else:
lowercase__ : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase__ : str = scheduler.step(
lowercase_ , lowercase_ , lowercase_ , prev_timestep=lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : str = pred_prev_sample
lowercase__ : Dict = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : List[Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def __UpperCamelCase ( self : Any ) -> Tuple:
pass
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
pass
| 87
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_snake_case = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def A ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None
else:
_lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F" got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Dict = match[0]
_lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements
_lowerCAmelCase : Optional[int] = {}
for w in want_range:
_lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F" but got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = match[0]
_lowerCAmelCase : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
_lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
_lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_lowerCamelCase , _lowerCamelCase )
| 36
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase : Optional[int] = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 366
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(__lowercase)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Any , **a :Union[str, Any] ) -> Union[str, Any]:
super().__init__(**a )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self :Any , a :Union[str, List[str], "Image", List["Image"]] , **a :Tuple ) -> List[str]:
return super().__call__(a , **a )
def _lowerCamelCase ( self :List[Any] , **a :List[str] ) -> List[Any]:
__UpperCamelCase : List[Any] = {}
if "candidate_labels" in kwargs:
__UpperCamelCase : Optional[int] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCamelCase : List[str] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _lowerCamelCase ( self :List[str] , a :Optional[int] , a :List[str]=None , a :Dict="This is a photo of {}." ) -> Any:
__UpperCamelCase : Dict = load_image(a )
__UpperCamelCase : Any = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCamelCase : str = candidate_labels
__UpperCamelCase : List[Any] = [hypothesis_template.format(a ) for x in candidate_labels]
__UpperCamelCase : List[Any] = self.tokenizer(a , return_tensors=self.framework , padding=a )
__UpperCamelCase : Any = [text_inputs]
return inputs
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[Any] ) -> List[Any]:
__UpperCamelCase : List[str] = model_inputs.pop("candidate_labels" )
__UpperCamelCase : Dict = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , a ):
__UpperCamelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
__UpperCamelCase : int = text_inputs[0][0]
__UpperCamelCase : str = self.model(**a , **a )
__UpperCamelCase : List[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self :List[Any] , a :List[Any] ) -> Tuple:
__UpperCamelCase : Any = model_outputs.pop("candidate_labels" )
__UpperCamelCase : Optional[Any] = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCamelCase : int = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCamelCase : List[str] = probs.tolist()
if not isinstance(a , a ):
__UpperCamelCase : List[Any] = [scores]
elif self.framework == "tf":
__UpperCamelCase : Optional[int] = stable_softmax(a , axis=-1 )
__UpperCamelCase : Dict = probs.numpy().tolist()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCamelCase : Tuple = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(a , a ) , key=lambda a : -x[0] )
]
return result
| 151
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ["""pixel_values"""]
def __init__( self , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = PILImageResampling.BILINEAR , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = 1 / 255 , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : int = size if size is not None else {'''shortest_edge''': 256}
__A : List[Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
__A : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__A : Tuple = get_size_dict(__lowerCamelCase , param_name='''crop_size''' )
__A : Optional[Any] = do_resize
__A : Union[str, Any] = size
__A : str = resample
__A : Optional[Any] = do_center_crop
__A : int = crop_size
__A : Optional[Any] = do_rescale
__A : Union[str, Any] = rescale_factor
__A : Tuple = do_normalize
__A : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = PILImageResampling.BICUBIC , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
__A : str = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Any = get_resize_output_image_size(__lowerCamelCase , size=size['''shortest_edge'''] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
__A : Tuple = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase ):
'''simple docstring'''
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = ChannelDimension.FIRST , **__lowerCamelCase , ):
'''simple docstring'''
__A : Any = do_resize if do_resize is not None else self.do_resize
__A : str = size if size is not None else self.size
__A : int = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
__A : int = resample if resample is not None else self.resample
__A : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : Tuple = crop_size if crop_size is not None else self.crop_size
__A : Optional[Any] = get_size_dict(__lowerCamelCase , param_name='''crop_size''' )
__A : Dict = do_rescale if do_rescale is not None else self.do_rescale
__A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__A : List[str] = image_mean if image_mean is not None else self.image_mean
__A : str = image_std if image_std is not None else self.image_std
__A : Optional[Any] = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__A : Optional[int] = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
__A : Dict = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
__A : List[str] = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
__A : Union[str, Any] = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
__A : Optional[Any] = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
__A : List[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
__A : List[str] = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowerCamelCase ):
__A : Optional[int] = target_sizes.numpy()
__A : Tuple = []
for idx in range(len(__lowerCamelCase ) ):
__A : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowerCamelCase )
__A : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowerCamelCase )
else:
__A : List[Any] = logits.argmax(dim=1 )
__A : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 179
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """microsoft/speecht5_tts"""
_lowerCamelCase = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
_lowerCamelCase = """text_reader"""
_lowerCamelCase = SpeechTaProcessor
_lowerCamelCase = SpeechTaForTextToSpeech
_lowerCamelCase = SpeechTaHifiGan
_lowerCamelCase = ["""text"""]
_lowerCamelCase = ["""audio"""]
def UpperCamelCase__( self ):
'''simple docstring'''
if self.post_processor is None:
__A : List[str] = '''microsoft/speecht5_hifigan'''
super().setup()
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None ):
'''simple docstring'''
__A : int = self.pre_processor(text=__lowerCamelCase , return_tensors='''pt''' , truncation=__lowerCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
__A : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
__A : int = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(__lowerCamelCase ).cpu().detach()
| 179
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = BertJapaneseTokenizer
_A = False
_A = True
def _lowerCamelCase ( self :Union[str, Any] ) -> Dict:
super().setUp()
__UpperCamelCase : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
__UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _lowerCamelCase ( self :Dict , a :List[Any] ) -> int:
__UpperCamelCase : List[str] = "こんにちは、世界。 \nこんばんは、世界。"
__UpperCamelCase : Union[str, Any] = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _lowerCamelCase ( self :Dict , a :int ) -> Tuple:
__UpperCamelCase : List[str] = self.get_input_output_texts(a )
__UpperCamelCase : Optional[Any] = tokenizer.encode(a , add_special_tokens=a )
__UpperCamelCase : Tuple = tokenizer.decode(a , clean_up_tokenization_spaces=a )
return text, ids
def _lowerCamelCase ( self :str ) -> List[str]:
pass # TODO add if relevant
def _lowerCamelCase ( self :List[str] ) -> Any:
pass # TODO add if relevant
def _lowerCamelCase ( self :Optional[Any] ) -> List[str]:
pass # TODO add if relevant
def _lowerCamelCase ( self :Optional[int] ) -> Union[str, Any]:
__UpperCamelCase : str = self.tokenizer_class(self.vocab_file )
__UpperCamelCase : Dict = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(a , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def _lowerCamelCase ( self :Tuple ) -> Tuple:
__UpperCamelCase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(a )
__UpperCamelCase : Optional[Any] = "こんにちは、世界。\nこんばんは、世界。"
__UpperCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(a , "wb" ) as handle:
pickle.dump(a , a )
with open(a , "rb" ) as handle:
__UpperCamelCase : List[str] = pickle.load(a )
__UpperCamelCase : Optional[Any] = tokenizer_new.tokenize(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Dict ) -> List[str]:
__UpperCamelCase : List[Any] = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[Any]:
try:
__UpperCamelCase : Optional[int] = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _lowerCamelCase ( self :Union[str, Any] ) -> Union[str, Any]:
try:
__UpperCamelCase : List[Any] = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _lowerCamelCase ( self :List[Any] ) -> Dict:
__UpperCamelCase : Union[str, Any] = MecabTokenizer(do_lower_case=a , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _lowerCamelCase ( self :str ) -> int:
try:
__UpperCamelCase : List[Any] = MecabTokenizer(
do_lower_case=a , normalize_text=a , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _lowerCamelCase ( self :List[str] ) -> Tuple:
__UpperCamelCase : Union[str, Any] = MecabTokenizer(normalize_text=a , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(a )
__UpperCamelCase : Tuple = "こんにちは、世界。\nこんばんは、世界。"
__UpperCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(a , "wb" ) as handle:
pickle.dump(a , a )
with open(a , "rb" ) as handle:
__UpperCamelCase : List[str] = pickle.load(a )
__UpperCamelCase : Tuple = tokenizer_new.tokenize(a )
self.assertListEqual(a , a )
@require_sudachi
def _lowerCamelCase ( self :str ) -> List[str]:
__UpperCamelCase : Any = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _lowerCamelCase ( self :List[Any] ) -> Optional[Any]:
__UpperCamelCase : int = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _lowerCamelCase ( self :Any ) -> Dict:
__UpperCamelCase : Optional[Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _lowerCamelCase ( self :List[Any] ) -> Optional[int]:
__UpperCamelCase : int = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _lowerCamelCase ( self :Optional[int] ) -> str:
__UpperCamelCase : int = SudachiTokenizer(do_lower_case=a , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
__UpperCamelCase : Union[str, Any] = SudachiTokenizer(normalize_text=a , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _lowerCamelCase ( self :Tuple ) -> Optional[int]:
__UpperCamelCase : Dict = SudachiTokenizer(trim_whitespace=a , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _lowerCamelCase ( self :Any ) -> Optional[int]:
__UpperCamelCase : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(a )
__UpperCamelCase : Optional[Any] = "こんにちは、世界。\nこんばんは、世界。"
__UpperCamelCase : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__UpperCamelCase : int = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(a , "wb" ) as handle:
pickle.dump(a , a )
with open(a , "rb" ) as handle:
__UpperCamelCase : str = pickle.load(a )
__UpperCamelCase : Tuple = tokenizer_new.tokenize(a )
self.assertListEqual(a , a )
@require_jumanpp
def _lowerCamelCase ( self :Union[str, Any] ) -> int:
__UpperCamelCase : Tuple = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = JumanppTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _lowerCamelCase ( self :Optional[Any] ) -> int:
__UpperCamelCase : Any = JumanppTokenizer(normalize_text=a )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = JumanppTokenizer(trim_whitespace=a )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _lowerCamelCase ( self :Union[str, Any] ) -> str:
__UpperCamelCase : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _lowerCamelCase ( self :Dict ) -> Any:
__UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
__UpperCamelCase : Optional[Any] = {}
for i, token in enumerate(a ):
__UpperCamelCase : int = i
__UpperCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _lowerCamelCase ( self :Union[str, Any] ) -> int:
__UpperCamelCase : Tuple = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
__UpperCamelCase : Optional[int] = tokenizer.subword_tokenizer
__UpperCamelCase : Any = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(a , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
__UpperCamelCase : str = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(a , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
__UpperCamelCase : Optional[Any] = tokenizer.encode("ありがとう。" , add_special_tokens=a )
__UpperCamelCase : int = tokenizer.encode("どういたしまして。" , add_special_tokens=a )
__UpperCamelCase : int = tokenizer.build_inputs_with_special_tokens(a )
__UpperCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a , a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = BertJapaneseTokenizer
_A = False
def _lowerCamelCase ( self :Dict ) -> Any:
super().setUp()
__UpperCamelCase : List[str] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _lowerCamelCase ( self :List[str] , **a :Union[str, Any] ) -> Any:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **a )
def _lowerCamelCase ( self :List[str] , a :Optional[Any] ) -> Any:
__UpperCamelCase : Any = "こんにちは、世界。 \nこんばんは、世界。"
__UpperCamelCase : Tuple = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _lowerCamelCase ( self :List[str] ) -> Union[str, Any]:
pass # TODO add if relevant
def _lowerCamelCase ( self :Dict ) -> Any:
pass # TODO add if relevant
def _lowerCamelCase ( self :int ) -> Any:
pass # TODO add if relevant
def _lowerCamelCase ( self :Optional[Any] ) -> Tuple:
__UpperCamelCase : List[str] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
__UpperCamelCase : Optional[Any] = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
a , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
__UpperCamelCase : int = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
__UpperCamelCase : List[Any] = {}
for i, token in enumerate(a ):
__UpperCamelCase : Optional[Any] = i
__UpperCamelCase : str = CharacterTokenizer(vocab=a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _lowerCamelCase ( self :Any ) -> Dict:
__UpperCamelCase : Tuple = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
__UpperCamelCase : Union[str, Any] = tokenizer.encode("ありがとう。" , add_special_tokens=a )
__UpperCamelCase : Optional[Any] = tokenizer.encode("どういたしまして。" , add_special_tokens=a )
__UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(a )
__UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(a , a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :str ) -> List[Any]:
__UpperCamelCase : Union[str, Any] = "cl-tohoku/bert-base-japanese"
__UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(a )
self.assertIsInstance(a , a )
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Optional[Any] ) -> Optional[int]:
__UpperCamelCase : Any = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(a )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
__UpperCamelCase : List[str] = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(a )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 367
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : Optional[int] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = DebertaVaTokenizer
_A = DebertaVaTokenizerFast
_A = True
_A = True
def _lowerCamelCase ( self :int ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase : Any = DebertaVaTokenizer(a , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self :Optional[int] , a :List[str] ) -> List[str]:
__UpperCamelCase : Any = "this is a test"
__UpperCamelCase : Optional[int] = "this is a test"
return input_text, output_text
def _lowerCamelCase ( self :str ) -> Any:
__UpperCamelCase : Optional[Any] = "<pad>"
__UpperCamelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
__UpperCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(a ) , 3_0_0_0_1 )
def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def _lowerCamelCase ( self :List[Any] ) -> str:
# fmt: off
__UpperCamelCase : int = " \tHeLLo!how \n Are yoU? "
__UpperCamelCase : Optional[int] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__UpperCamelCase : Dict = DebertaVaTokenizer(a , do_lower_case=a )
__UpperCamelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = DebertaVaTokenizerFast(a , do_lower_case=a )
__UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCamelCase ( self :str ) -> Any:
pass
def _lowerCamelCase ( self :Tuple ) -> Dict:
# fmt: off
__UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCamelCase : Optional[int] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCamelCase : Dict = DebertaVaTokenizer(a , split_by_punct=a )
__UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = DebertaVaTokenizerFast(a , split_by_punct=a )
__UpperCamelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :List[Any] ) -> str:
# fmt: off
__UpperCamelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCamelCase : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCamelCase : Any = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : Dict = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Dict ) -> Any:
# fmt: off
__UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCamelCase : Tuple = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__UpperCamelCase : Optional[int] = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :List[str] ) -> Tuple:
# fmt: off
__UpperCamelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCamelCase : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCamelCase : List[str] = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : List[str] = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
# fmt: off
__UpperCamelCase : Optional[int] = " \tHeLLo!how \n Are yoU? "
__UpperCamelCase : str = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__UpperCamelCase : int = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : Tuple = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :int ) -> Any:
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : List[Any] = self.get_rust_tokenizer()
__UpperCamelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
__UpperCamelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : str = tokenizer.encode(a , add_special_tokens=a )
__UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[int] = self.get_rust_tokenizer()
__UpperCamelCase : List[Any] = tokenizer.encode(a )
__UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
__UpperCamelCase : Optional[int] = "This is a test"
__UpperCamelCase : List[Any] = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__UpperCamelCase : Tuple = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__UpperCamelCase : Union[str, Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__UpperCamelCase : Union[str, Any] = DebertaVaTokenizer(a , keep_accents=a )
__UpperCamelCase : int = DebertaVaTokenizerFast(a , keep_accents=a )
__UpperCamelCase : Tuple = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : List[str] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
# fmt: off
__UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCamelCase : int = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__UpperCamelCase : Optional[int] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__UpperCamelCase : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__UpperCamelCase : List[str] = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__UpperCamelCase : Dict = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : int = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> str:
__UpperCamelCase : List[Any] = DebertaVaTokenizer(a )
__UpperCamelCase : Optional[int] = tokenizer.encode("sequence builders" )
__UpperCamelCase : Optional[int] = tokenizer.encode("multi-sequence build" )
__UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a )
__UpperCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a , a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a , )
@slow
def _lowerCamelCase ( self :Dict ) -> int:
# fmt: off
__UpperCamelCase : Dict = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 151
| 0
|
import requests
_lowercase: Optional[int] = "" # <-- Put your OpenWeatherMap appid here!
_lowercase: Any = "https://api.openweathermap.org/data/2.5/"
def a( A : str = "Chicago" , A : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def a( A : str = "Kolkata, India" , A : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def a( A : float = 55.68 , A : float = 12.57 , A : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_lowercase: Union[str, Any] = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 227
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def a( A : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def a( A : np.ndarray , A : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
a = XGBClassifier()
classifier.fit(A , A )
return classifier
def a( ) -> None:
"""simple docstring"""
a = load_iris()
a , a = data_handling(A )
a , a , a , a = train_test_split(
A , A , test_size=0.25 )
a = iris["target_names"]
# Create an XGBoost Classifier from the training data
a = xgboost(A , A )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A , A , A , display_labels=A , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 227
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A ={
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
from sklearn.metrics import recall_score
import datasets
__A ='''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__A ='''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__A ='''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase=1 , lowercase="binary" , lowercase=None , lowercase="warn" , ) -> Optional[int]:
lowerCamelCase_ = recall_score(
lowercase , lowercase , labels=lowercase , pos_label=lowercase , average=lowercase , sample_weight=lowercase , zero_division=lowercase , )
return {"recall": float(lowercase ) if score.size == 1 else score}
| 47
| 0
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase__ = None
try:
import msvcrt
except ImportError:
lowerCAmelCase__ = None
try:
import fcntl
except ImportError:
lowerCAmelCase__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase__ = OSError
# Data
# ------------------------------------------------
lowerCAmelCase__ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowerCAmelCase__ = """3.0.12"""
lowerCAmelCase__ = None
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
global _logger
A__ = _logger or logging.getLogger(__name__ )
return _logger
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = lock_file
return None
def __str__( self ) -> Dict:
'''simple docstring'''
A__ = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = lock
return None
def __enter__( self ) -> List[Any]:
'''simple docstring'''
return self.lock
def __exit__( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
self.lock.release()
return None
class a__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=-1 , lowercase=None ) -> Any:
'''simple docstring'''
A__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
A__ = self.hash_filename_if_too_long(lowercase , lowercase )
# The path to the lock file.
A__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
A__ = None
# The default timeout value.
A__ = timeout
# We use this lock primarily for the lock counter.
A__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
A__ = 0
return None
@property
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return self._lock_file
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self._timeout
@timeout.setter
def UpperCamelCase ( self , lowercase ) -> Dict:
'''simple docstring'''
A__ = float(lowercase )
return None
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError()
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return self._lock_file_fd is not None
def UpperCamelCase ( self , lowercase=None , lowercase=0.05 ) -> Tuple:
'''simple docstring'''
if timeout is None:
A__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
A__ = id(self )
A__ = self._lock_file
A__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(lowercase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
A__ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase ( self , lowercase=False ) -> Optional[Any]:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
A__ = id(self )
A__ = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
A__ = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ) -> Optional[Any]:
'''simple docstring'''
self.acquire()
return self
def __exit__( self , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
self.release()
return None
def __del__( self ) -> int:
'''simple docstring'''
self.release(force=lowercase )
return None
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = os.path.basename(lowercase )
if len(lowercase ) > max_length and max_length > 0:
A__ = os.path.dirname(lowercase )
A__ = str(hash(lowercase ) )
A__ = filename[: max_length - len(lowercase ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowercase , lowercase )
else:
return path
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=-1 , lowercase=None ) -> Dict:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowercase , timeout=lowercase , max_filename_length=lowercase )
A__ = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
A__ = os.open(self._lock_file , lowercase )
except OSError:
pass
else:
try:
msvcrt.locking(lowercase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowercase )
else:
A__ = fd
return None
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self._lock_file_fd
A__ = None
msvcrt.locking(lowercase , msvcrt.LK_UNLCK , 1 )
os.close(lowercase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=-1 , lowercase=None ) -> List[Any]:
'''simple docstring'''
A__ = os.statvfs(os.path.dirname(lowercase ) ).f_namemax
super().__init__(lowercase , timeout=lowercase , max_filename_length=lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
A__ = os.open(self._lock_file , lowercase )
try:
fcntl.flock(lowercase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowercase )
else:
A__ = fd
return None
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = self._lock_file_fd
A__ = None
fcntl.flock(lowercase , fcntl.LOCK_UN )
os.close(lowercase )
return None
class a__ ( snake_case ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
A__ = os.open(self._lock_file , lowercase )
except OSError:
pass
else:
A__ = fd
return None
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
os.close(self._lock_file_fd )
A__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase__ = None
if msvcrt:
lowerCAmelCase__ = WindowsFileLock
elif fcntl:
lowerCAmelCase__ = UnixFileLock
else:
lowerCAmelCase__ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 68
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335
| 0
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowercase: Optional[Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_lowercase: Union[str, Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_lowercase: Union[str, Any] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_lowercase: List[Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_lowercase: List[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=[1, 10, 100] , lowerCamelCase_=4 , lowerCamelCase_=3.0 ):
"""simple docstring"""
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=lowerCamelCase_ ) as executor:
a = []
a = Counter()
a = 0
a = defaultdict(lowerCamelCase_ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
for candidate in candidates:
a = candidate + "\n" + test_case
a = (test_program, timeout, task_id, completion_id[task_id])
a = executor.submit(lowerCamelCase_ , *lowerCamelCase_ )
futures.append(lowerCamelCase_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase_ ):
a = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
a , a = [], []
for result in results.values():
result.sort()
a = [r[1]["passed"] for r in result]
total.append(len(lowerCamelCase_ ) )
correct.append(sum(lowerCamelCase_ ) )
a = np.array(lowerCamelCase_ )
a = np.array(lowerCamelCase_ )
a = k
a = {F'''pass@{k}''': estimate_pass_at_k(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a( A : Optional[Any] , A : str , A : Dict ) -> Optional[int]:
"""simple docstring"""
def estimator(A : int , A : int , A : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(A , A ):
a = itertools.repeat(A , len(A ) )
else:
assert len(A ) == len(A )
a = iter(A )
return np.array([estimator(int(A ) , int(A ) , A ) for n, c in zip(A , A )] )
| 362
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowercase: Optional[Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_lowercase: Union[str, Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_lowercase: Union[str, Any] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_lowercase: List[Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_lowercase: List[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=[1, 10, 100] , lowerCamelCase_=4 , lowerCamelCase_=3.0 ):
"""simple docstring"""
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=lowerCamelCase_ ) as executor:
a = []
a = Counter()
a = 0
a = defaultdict(lowerCamelCase_ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
for candidate in candidates:
a = candidate + "\n" + test_case
a = (test_program, timeout, task_id, completion_id[task_id])
a = executor.submit(lowerCamelCase_ , *lowerCamelCase_ )
futures.append(lowerCamelCase_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase_ ):
a = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
a , a = [], []
for result in results.values():
result.sort()
a = [r[1]["passed"] for r in result]
total.append(len(lowerCamelCase_ ) )
correct.append(sum(lowerCamelCase_ ) )
a = np.array(lowerCamelCase_ )
a = np.array(lowerCamelCase_ )
a = k
a = {F'''pass@{k}''': estimate_pass_at_k(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a( A : Optional[Any] , A : str , A : Dict ) -> Optional[int]:
"""simple docstring"""
def estimator(A : int , A : int , A : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(A , A ):
a = itertools.repeat(A , len(A ) )
else:
assert len(A ) == len(A )
a = iter(A )
return np.array([estimator(int(A ) , int(A ) , A ) for n, c in zip(A , A )] )
| 71
| 0
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase : Any =logging.get_logger(__name__)
class __a ( _snake_case ):
_lowerCAmelCase : Union[str, Any] = ["""input_values""", """attention_mask"""]
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1_60_00 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : int = 80 , SCREAMING_SNAKE_CASE : int = 16 , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : str = "hann_window" , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : float = 80 , SCREAMING_SNAKE_CASE : float = 76_00 , SCREAMING_SNAKE_CASE : float = 1e-1_0 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_ )
UpperCamelCase__ : int = do_normalize
UpperCamelCase__ : List[str] = return_attention_mask
UpperCamelCase__ : List[str] = num_mel_bins
UpperCamelCase__ : List[str] = hop_length
UpperCamelCase__ : List[Any] = win_length
UpperCamelCase__ : Tuple = win_function
UpperCamelCase__ : Any = frame_signal_scale
UpperCamelCase__ : Dict = fmin
UpperCamelCase__ : Optional[Any] = fmax
UpperCamelCase__ : int = mel_floor
UpperCamelCase__ : List[str] = reduction_factor
UpperCamelCase__ : str = win_length * sampling_rate // 10_00
UpperCamelCase__ : Union[str, Any] = hop_length * sampling_rate // 10_00
UpperCamelCase__ : Dict = optimal_fft_length(self.sample_size )
UpperCamelCase__ : Dict = (self.n_fft // 2) + 1
UpperCamelCase__ : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase_ )
UpperCamelCase__ : str = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , lowercase_ , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , lowercase_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowercase ( SCREAMING_SNAKE_CASE : List[np.ndarray] , SCREAMING_SNAKE_CASE : List[np.ndarray] , SCREAMING_SNAKE_CASE : float = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
UpperCamelCase__ : List[str] = np.array(lowercase_ , np.intaa )
UpperCamelCase__ : Dict = []
for vector, length in zip(lowercase_ , attention_mask.sum(-1 ) ):
UpperCamelCase__ : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase__ : Tuple = padding_value
normed_input_values.append(lowercase_ )
else:
UpperCamelCase__ : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.ndarray , ):
'''simple docstring'''
UpperCamelCase__ : Any = spectrogram(
lowercase_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
UpperCamelCase__ : Tuple = self._process_audio(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , )
else:
UpperCamelCase__ : List[Any] = None
if audio_target is not None:
UpperCamelCase__ : Optional[Any] = self._process_audio(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , )
if inputs is None:
return inputs_target
else:
UpperCamelCase__ : List[Any] = inputs_target['input_values']
UpperCamelCase__ : int = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCamelCase__ : Any = decoder_attention_mask
return inputs
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = isinstance(lowercase_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
UpperCamelCase__ : Dict = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase__ : Optional[int] = [np.asarray(lowercase_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray ):
UpperCamelCase__ : List[str] = np.asarray(lowercase_ , dtype=np.floataa )
elif isinstance(lowercase_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCamelCase__ : Any = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase__ : List[Any] = [speech]
# needed to make pad() work on spectrogram inputs
UpperCamelCase__ : Dict = self.feature_size
# convert into correct format for padding
if is_target:
UpperCamelCase__ : Dict = [self._extract_mel_features(lowercase_ ) for waveform in speech]
UpperCamelCase__ : Optional[Any] = BatchFeature({"input_values": features} )
UpperCamelCase__ : Any = self.num_mel_bins
else:
UpperCamelCase__ : Optional[Any] = BatchFeature({"input_values": speech} )
UpperCamelCase__ : Union[str, Any] = self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
UpperCamelCase__ : Union[str, Any] = feature_size_hack
# convert input values to correct format
UpperCamelCase__ : List[str] = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
UpperCamelCase__ : List[str] = [np.asarray(lowercase_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowercase_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCamelCase__ : List[Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowercase_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCamelCase__ : List[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCamelCase__ : int = padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCamelCase__ : List[Any] = [np.asarray(lowercase_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCamelCase__ : Optional[Any] = (
attention_mask
if self._get_padding_strategies(lowercase_ , max_length=lowercase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase__ : Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=lowercase_ , padding_value=self.padding_value )
if return_tensors is not None:
UpperCamelCase__ : Dict = padded_inputs.convert_to_tensors(lowercase_ )
return padded_inputs
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCamelCase__ : int = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 189
|
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase( UpperCAmelCase_ ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , UpperCAmelCase_ , )
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase , UpperCAmelCase : List[str] = image[0].size
UpperCAmelCase , UpperCAmelCase : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCAmelCase : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
UpperCAmelCase : Optional[int] = np.concatenate(UpperCAmelCase_ , axis=0 )
UpperCAmelCase : List[Any] = np.array(UpperCAmelCase_ ).astype(np.floataa ) / 255.0
UpperCAmelCase : Any = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase : Any = 2.0 * image - 1.0
UpperCAmelCase : List[str] = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase : List[Any] = torch.cat(UpperCAmelCase_ , dim=0 )
return image
def UpperCamelCase( UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return mask
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase : List[str] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCAmelCase , UpperCAmelCase : List[Any] = mask[0].size
UpperCAmelCase , UpperCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase : Tuple = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
UpperCAmelCase : Optional[int] = np.concatenate(UpperCAmelCase_ , axis=0 )
UpperCAmelCase : Any = mask.astype(np.floataa ) / 255.0
UpperCAmelCase : str = 0
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[Any] = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(mask[0] , torch.Tensor ):
UpperCAmelCase : List[str] = torch.cat(UpperCAmelCase_ , dim=0 )
return mask
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : UNetaDModel
UpperCAmelCase_ : RePaintScheduler
def __init__( self : List[str] , lowercase_ : List[str] , lowercase_ : Tuple ) -> Tuple:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : Union[torch.Tensor, PIL.Image.Image] , lowercase_ : Union[torch.Tensor, PIL.Image.Image] , lowercase_ : int = 250 , lowercase_ : float = 0.0 , lowercase_ : int = 10 , lowercase_ : int = 10 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : Dict = image
UpperCAmelCase : Optional[int] = _preprocess_image(lowercase_ )
UpperCAmelCase : Optional[Any] = original_image.to(device=self.device , dtype=self.unet.dtype )
UpperCAmelCase : Optional[Any] = _preprocess_mask(lowercase_ )
UpperCAmelCase : List[str] = mask_image.to(device=self.device , dtype=self.unet.dtype )
UpperCAmelCase : Any = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase : List[str] = original_image.shape
UpperCAmelCase : str = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase_ , lowercase_ , lowercase_ , self.device )
UpperCAmelCase : Tuple = eta
UpperCAmelCase : Optional[int] = self.scheduler.timesteps[0] + 1
UpperCAmelCase : List[Any] = generator[0] if isinstance(lowercase_ , lowercase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCAmelCase : str = self.unet(lowercase_ , lowercase_ ).sample
# compute previous image: x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCAmelCase : int = self.scheduler.undo_step(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = t
UpperCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 151
| 0
|
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A (__lowerCamelCase :int = 3 ):
if isinstance(_snake_case , _snake_case ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
_lowerCAmelCase = QuantumRegister(_snake_case , """qr""" )
_lowerCAmelCase = ClassicalRegister(_snake_case , """cr""" )
_lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case )
_lowerCAmelCase = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
_lowerCAmelCase = Aer.get_backend("""qasm_simulator""" )
_lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 352
|
'''simple docstring'''
import numpy as np
import qiskit
def A (__lowerCamelCase :int = 8 , __lowerCamelCase :int | None = None ):
_lowerCAmelCase = np.random.default_rng(seed=__lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase = rng.integers(2 , size=__lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase = rng.integers(2 , size=__lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase = rng.integers(2 , size=__lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase = qiskit.QuantumCircuit(__lowerCamelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=1 , seed_simulator=__lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase = job.result().get_counts(__lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase = gen_key[:key_len] if len(__lowerCamelCase ) >= key_len else gen_key.ljust(__lowerCamelCase , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 229
| 0
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_snake_case = False
class a__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_lowercase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_lowercase : Optional[int] = torch.manual_seed(0 )
_lowercase : Any = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
_lowercase : List[Any] = VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_lowercase : List[str] = generator.manual_seed(0 )
_lowercase : List[Any] = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_lowercase : List[Any] = 'cyberpunk 2077'
_lowercase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_lowercase : Optional[int] = torch.manual_seed(0 )
_lowercase : Union[str, Any] = pipe.dual_guided(
prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_lowercase : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : str = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_lowercase : str = 'A painting of a squirrel eating a burger '
_lowercase : Optional[int] = torch.manual_seed(0 )
_lowercase : Tuple = pipe.text_to_image(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_lowercase : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : Optional[Any] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_lowercase : Any = pipe.image_variation(lowercase_ , generator=lowercase_ , output_type="numpy" ).images
_lowercase : Optional[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : Any = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 250
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XGLMConfig
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : List[str] = """gelu"""
def __init__( self : Tuple , lowercase_ : str , lowercase_ : List[str]=14 , lowercase_ : Optional[int]=7 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=99 , lowercase_ : Optional[int]=32 , lowercase_ : Any=2 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[int]=37 , lowercase_ : List[str]="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Union[str, Any]=0.02 , ) -> str:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : int = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Optional[int] = d_model
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : List[str] = ffn_dim
UpperCAmelCase : Optional[int] = activation_function
UpperCAmelCase : Optional[Any] = activation_dropout
UpperCAmelCase : Dict = attention_dropout
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = 0
UpperCAmelCase : List[Any] = 2
UpperCAmelCase : Optional[Any] = 1
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def UpperCAmelCase_ ( self : Dict ) -> int:
UpperCAmelCase : Any = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = self.get_config()
UpperCAmelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowercase_ , )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase : List[str] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase_ : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase_ : str = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : str = False
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
UpperCAmelCase : Any = TFXGLMModelTester(self )
UpperCAmelCase : int = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
self.config_tester.run_common_tests()
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = TFXGLMModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : int , lowercase_ : str=True ) -> Any:
UpperCAmelCase : str = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Any = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase : Union[str, Any] = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCAmelCase : int = model.generate(lowercase_ , do_sample=lowercase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : str = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCAmelCase : Dict = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCAmelCase : Tuple = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCAmelCase : int = model.generate(lowercase_ , do_sample=lowercase_ , seed=[7, 0] )
UpperCAmelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase : Dict = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : int ) -> str:
UpperCAmelCase : List[str] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : str = 'left'
# use different length sentences to test batching
UpperCAmelCase : Tuple = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCAmelCase : Union[str, Any] = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
UpperCAmelCase : Any = inputs['input_ids']
UpperCAmelCase : int = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
UpperCAmelCase : Union[str, Any] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCAmelCase : Dict = model.generate(input_ids=lowercase_ , max_new_tokens=12 )
UpperCAmelCase : Tuple = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCAmelCase : List[Any] = model.generate(input_ids=lowercase_ , max_new_tokens=12 )
UpperCAmelCase : List[str] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase : str = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
| 151
| 0
|
'''simple docstring'''
import enum
import shutil
import sys
lowerCAmelCase , lowerCAmelCase : Tuple =shutil.get_terminal_size()
lowerCAmelCase : Optional[int] ={'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class a_ ( enum.Enum ):
__A = 0
__A = 1
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : int="" ):
sys.stdout.write(str(__lowerCamelCase ) + end )
sys.stdout.flush()
def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Optional[int]="" ):
forceWrite(F'\u001b[{color}m{content}\u001b[0m' ,__lowerCamelCase )
def UpperCAmelCase_ ( ):
forceWrite("\r" )
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : str ):
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def UpperCAmelCase_ ( ):
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase_ ( ):
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 147
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( _lowerCAmelCase ):
__A = ["image_processor", "tokenizer"]
__A = "LayoutLMv3ImageProcessor"
__A = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : int , lowercase : Optional[Any]=None , lowercase : List[str]=None , **lowercase : Optional[int] ):
"""simple docstring"""
lowercase_ :int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase , )
lowercase_ :Optional[int] = kwargs.pop("feature_extractor" )
lowercase_ :Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase , lowercase )
def __call__( self : Optional[Any] , lowercase : List[str] , lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase : Union[List[List[int]], List[List[List[int]]]] = None , lowercase : Optional[Union[List[int], List[List[int]]]] = None , lowercase : bool = True , lowercase : Union[bool, str, PaddingStrategy] = False , lowercase : Union[bool, str, TruncationStrategy] = None , lowercase : Optional[int] = None , lowercase : int = 0 , lowercase : Optional[int] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = True , lowercase : Optional[Union[str, TensorType]] = None , **lowercase : List[Any] , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase_ :Dict = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
lowercase_ :str = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase_ :Union[str, Any] = features["words"]
lowercase_ :Optional[Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
lowercase_ :Any = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase_ :Any = self.get_overflowing_images(lowercase , encoded_inputs["overflow_to_sample_mapping"] )
lowercase_ :Any = images
return encoded_inputs
def lowercase__ ( self : List[Any] , lowercase : Any , lowercase : Optional[Any] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def lowercase__ ( self : Union[str, Any] , *lowercase : List[Any] , **lowercase : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def lowercase__ ( self : List[Any] , *lowercase : Any , **lowercase : str ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowercase__ ( self : str ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase , )
return self.image_processor_class
@property
def lowercase__ ( self : Tuple ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase , )
return self.image_processor
| 147
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a__ : List[Any] = None
a__ : Dict = logging.get_logger(__name__)
a__ : Any = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
a__ : str = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
a__ : Any = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
a__ : int = '▁'
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
__UpperCAmelCase = BarthezTokenizer
def __init__( self , a=None , a=None , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , **a , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = False if not self.vocab_file else True
def __a ( self , a , a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , a , a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 80
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =botoa.client('iam' )
_SCREAMING_SNAKE_CASE ={
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_UpperCamelCase , AssumeRolePolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) )
_SCREAMING_SNAKE_CASE ={
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_UpperCamelCase , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =botoa.client('iam' )
return iam_client.get_role(RoleName=_UpperCamelCase )["Role"]["Arn"]
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =_ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , _UpperCamelCase , )
_SCREAMING_SNAKE_CASE =None
if credentials_configuration == 0:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Profile name: [default] ' , default='default' )
_SCREAMING_SNAKE_CASE =aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
_SCREAMING_SNAKE_CASE =_ask_field('AWS Access Key ID: ' )
_SCREAMING_SNAKE_CASE =aws_access_key_id
_SCREAMING_SNAKE_CASE =_ask_field('AWS Secret Access Key: ' )
_SCREAMING_SNAKE_CASE =aws_secret_access_key
_SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
_SCREAMING_SNAKE_CASE =aws_region
_SCREAMING_SNAKE_CASE =_ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , _UpperCamelCase , )
if role_management == 0:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your IAM role name: ' )
else:
_SCREAMING_SNAKE_CASE ='accelerate_sagemaker_execution_role'
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_custom_docker_image:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your Docker image: ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_sagemaker_inputs_enabled:
_SCREAMING_SNAKE_CASE =_ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_sagemaker_metrics_enabled:
_SCREAMING_SNAKE_CASE =_ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , )
_SCREAMING_SNAKE_CASE =_ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
if use_dynamo:
_SCREAMING_SNAKE_CASE ='dynamo_'
_SCREAMING_SNAKE_CASE =_ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
if use_custom_options:
_SCREAMING_SNAKE_CASE =_ask_options(
'Which mode do you want to use?' , _UpperCamelCase , lambda _UpperCamelCase : TORCH_DYNAMO_MODES[int(_UpperCamelCase )] , default='default' , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE ='Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
_SCREAMING_SNAKE_CASE =_ask_options(
_UpperCamelCase , _UpperCamelCase , lambda _UpperCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_UpperCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_SCREAMING_SNAKE_CASE =_ask_field(_UpperCamelCase , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , default='ml.p3.2xlarge' )
_SCREAMING_SNAKE_CASE =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_SCREAMING_SNAKE_CASE =_ask_field(
'How many machines do you want use? [1]: ' , _UpperCamelCase , default=1 , )
_SCREAMING_SNAKE_CASE =_ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=_UpperCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_UpperCamelCase , use_cpu=_UpperCamelCase , dynamo_config=_UpperCamelCase , eca_instance_type=_UpperCamelCase , profile=_UpperCamelCase , region=_UpperCamelCase , iam_role_name=_UpperCamelCase , mixed_precision=_UpperCamelCase , num_machines=_UpperCamelCase , sagemaker_inputs_file=_UpperCamelCase , sagemaker_metrics_file=_UpperCamelCase , )
| 47
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Optional[Any] ) -> List[str]:
lowerCamelCase__ : Optional[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase__ : Optional[Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase ) , torch_builtin(UpperCAmelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase ) , gelu_new(UpperCAmelCase ) ) )
def A_ ( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : Optional[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase__ : str = get_activation('gelu' )
lowerCamelCase__ : Any = get_activation('gelu_10' )
lowerCamelCase__ : Optional[int] = torch_builtin(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = geluaa(UpperCAmelCase )
lowerCamelCase__ : int = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A_ ( self : Tuple ) -> Union[str, Any]:
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(UpperCAmelCase ):
get_activation('bogus' )
with self.assertRaises(UpperCAmelCase ):
get_activation(UpperCAmelCase )
def A_ ( self : Any ) -> str:
lowerCamelCase__ : Optional[int] = get_activation('gelu' )
lowerCamelCase__ : Optional[Any] = 1
lowerCamelCase__ : Any = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : Dict = acta.a
| 45
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
if digit_amount > 0:
return round(number - int(_UpperCAmelCase ) , _UpperCAmelCase )
return number - int(_UpperCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 45
| 1
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_UpperCAmelCase : Optional[Any] = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Any = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(a_, a_ )
_UpperCAmelCase : int = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :str = list(s_dict.keys() )
for key in keys:
lowercase :str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase :Optional[Any] = new_key.replace(a_, a_ )
print(F"{key} -> {new_key}" )
lowercase :Dict = s_dict.pop(a_ )
return s_dict
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Tuple = emb.weight.shape
lowercase :Tuple = nn.Linear(a_, a_, bias=a_ )
lowercase :List[Any] = emb.weight.data
return lin_layer
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
os.makedirs(a_, exist_ok=a_ )
lowercase :Optional[int] = os.path.basename(a_ )
lowercase :Union[str, Any] = url.split("/" )[-2]
lowercase :Union[str, Any] = os.path.join(a_, a_ )
if os.path.exists(a_ ) and not os.path.isfile(a_ ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(a_ ):
lowercase :str = open(a_, "rb" ).read()
if hashlib.shaaaa(a_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(a_ ) as source, open(a_, "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ), ncols=80, unit="iB", unit_scale=a_, unit_divisor=1024 ) as loop:
while True:
lowercase :Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(a_ )
loop.update(len(a_ ) )
lowercase :List[Any] = open(a_, "rb" ).read()
if hashlib.shaaaa(a_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if ".pt" not in checkpoint_path:
lowercase :int = _download(_MODELS[checkpoint_path] )
else:
lowercase :List[str] = torch.load(a_, map_location="cpu" )
lowercase :Union[str, Any] = original_checkpoint['dims']
lowercase :List[Any] = original_checkpoint['model_state_dict']
lowercase :Dict = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(a_ )
rename_keys(a_ )
lowercase :List[str] = True
lowercase :str = state_dict['decoder.layers.0.fc1.weight'].shape[0]
lowercase :Optional[int] = WhisperConfig(
vocab_size=dimensions["n_vocab"], encoder_ffn_dim=a_, decoder_ffn_dim=a_, num_mel_bins=dimensions["n_mels"], d_model=dimensions["n_audio_state"], max_target_positions=dimensions["n_text_ctx"], encoder_layers=dimensions["n_audio_layer"], encoder_attention_heads=dimensions["n_audio_head"], decoder_layers=dimensions["n_text_layer"], decoder_attention_heads=dimensions["n_text_state"], max_source_positions=dimensions["n_audio_ctx"], )
lowercase :List[str] = WhisperForConditionalGeneration(a_ )
lowercase :Union[str, Any] = model.model.load_state_dict(a_, strict=a_ )
if len(a_ ) > 0 and not set(a_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
lowercase :Optional[int] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase :List[str] = proj_out_weights
model.save_pretrained(a_ )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 236
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A_ :List[str] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
A_ :Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
A_ :Tuple = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
A_ :List[str] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
A_ :Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=[1, 10, 100] , lowerCamelCase__=4 , lowerCamelCase__=3.0 ):
"""simple docstring"""
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Any =Counter()
__UpperCamelCase : List[Any] =0
__UpperCamelCase : int =defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
for candidate in candidates:
__UpperCamelCase : str =candidate + '\n' + test_case
__UpperCamelCase : Any =(test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase : Optional[Any] =executor.submit(lowerCamelCase__ , *lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
__UpperCamelCase : str =future.result()
results[result["task_id"]].append((result['completion_id'], result) )
__UpperCamelCase , __UpperCamelCase : int =[], []
for result in results.values():
result.sort()
__UpperCamelCase : str =[r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =np.array(lowerCamelCase__ )
__UpperCamelCase : List[str] =np.array(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =k
__UpperCamelCase : List[Any] ={f'pass@{k}': estimate_pass_at_k(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
def estimator(a_ ,a_ ,a_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(a_ ,a_ ):
__UpperCamelCase : Optional[int] =itertools.repeat(a_ ,len(a_ ) )
else:
assert len(a_ ) == len(a_ )
__UpperCamelCase : List[Any] =iter(a_ )
return np.array([estimator(int(a_ ) ,int(a_ ) ,a_ ) for n, c in zip(a_ ,a_ )] )
| 71
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case : int =logging.get_logger(__name__)
__snake_case : int ={'vocab_file': 'spiece.model'}
__snake_case : int ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__snake_case : Union[str, Any] ={
'albert-base-v1': 5_1_2,
'albert-large-v1': 5_1_2,
'albert-xlarge-v1': 5_1_2,
'albert-xxlarge-v1': 5_1_2,
'albert-base-v2': 5_1_2,
'albert-large-v2': 5_1_2,
'albert-xlarge-v2': 5_1_2,
'albert-xxlarge-v2': 5_1_2,
}
__snake_case : Tuple ='▁'
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=False ,__lowerCamelCase="[CLS]" ,__lowerCamelCase="[SEP]" ,__lowerCamelCase="<unk>" ,__lowerCamelCase="[SEP]" ,__lowerCamelCase="<pad>" ,__lowerCamelCase="[CLS]" ,__lowerCamelCase="[MASK]" ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = (
AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ,normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase ,__lowerCamelCase )
else mask_token
)
lowerCAmelCase__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase ,remove_space=__lowerCamelCase ,keep_accents=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCamelCase ,)
lowerCAmelCase__ : Tuple = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : Union[str, Any] = keep_accents
lowerCAmelCase__ : List[Any] = vocab_file
lowerCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase__ : List[Any] = None
return state
def __setstate__(self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowerCAmelCase__ : List[str] = {}
lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if self.remove_space:
lowerCAmelCase__ : Dict = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase__ : int = inputs
lowerCAmelCase__ : Optional[Any] = outputs.replace('''``''' ,'''"''' ).replace('''\'\'''' ,'''"''' )
if not self.keep_accents:
lowerCAmelCase__ : Union[str, Any] = unicodedata.normalize('''NFKD''' ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
lowerCAmelCase__ : Any = outputs.lower()
return outputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.preprocess_text(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.sp_model.encode(__lowerCamelCase ,out_type=__lowerCamelCase )
lowerCAmelCase__ : str = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase__ : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase ,'''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase__ : Union[str, Any] = cur_pieces[1:]
else:
lowerCAmelCase__ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.PieceToId(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
return self.sp_model.IdToPiece(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Optional[int] = ''''''
lowerCAmelCase__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Any = []
else:
current_sub_tokens.append(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase ,token_ids_a=__lowerCamelCase ,already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Optional[Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase ,'''wb''' ) as fi:
lowerCAmelCase__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 354
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =IFInpaintingSuperResolutionPipeline
snake_case_ =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
snake_case_ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
snake_case_ =PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=0 ) -> Dict:
"""simple docstring"""
if str(__lowerCamelCase ).startswith('''mps''' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(__lowerCamelCase )
else:
lowerCAmelCase__ : int = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCAmelCase__ : str = floats_tensor((1, 3, 16, 16) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''' )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_local()
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 94
| 0
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=32 , lowercase=2 , lowercase=3 , lowercase=16 , lowercase=[1, 2, 1] , lowercase=[2, 2, 4] , lowercase=2 , lowercase=2.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=False , lowercase=True , lowercase=0.0_2 , lowercase=1e-5 , lowercase=True , lowercase=None , lowercase=True , lowercase=10 , lowercase=8 , lowercase=["stage1", "stage2", "stage3"] , lowercase=[1, 2, 3] , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
lowerCamelCase_ = ["stem"]
lowerCamelCase_ = MaskFormerSwinBackbone(config=lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = MaskFormerSwinModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
return
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("Swin does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass
@unittest.skip("Swin does not support feedforward chunking" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def SCREAMING_SNAKE_CASE_( self ) -> int:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase_ = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase_ = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase ):
lowerCamelCase_ = 0
return t
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
with torch.no_grad():
lowerCamelCase_ = model(**lowercase , return_dict=lowercase , **lowercase )
lowerCamelCase_ = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has'
f' `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}.'
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"output_hidden_states": True} )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"output_hidden_states": True} )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase , snake_case_ ):
lowerCAmelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = MaskFormerSwinConfig
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = MaskFormerSwinModelTester(self )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
lowerCamelCase_ = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
lowerCamelCase_ = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCamelCase_ = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCamelCase_ = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 19
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_A : int = True
except (ImportError, ModuleNotFoundError):
_A : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCamelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
re.sub("""<n>""" , """""" , snake_case_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case_ ) )
| 229
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : List[Any] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """wavlm"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Union[str, Any]=7_6_8 , lowerCAmelCase_ : str=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Optional[Any]=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Tuple=1e-5 , lowerCAmelCase_ : str="group" , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase_ : Tuple=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase_ : Tuple=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : str=1_2_8 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : Tuple=3_2_0 , lowerCAmelCase_ : Dict=8_0_0 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str=0.05 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : Optional[Any]=1_0 , lowerCAmelCase_ : int=3_2_0 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : int=1_0_0 , lowerCAmelCase_ : str=2_5_6 , lowerCAmelCase_ : Union[str, Any]=2_5_6 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Any="mean" , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : str=2_5_6 , lowerCAmelCase_ : Optional[int]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCAmelCase_ : List[Any]=(5, 3, 3, 1, 1) , lowerCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , lowerCAmelCase_ : Optional[Any]=5_1_2 , lowerCAmelCase_ : Dict=8_0 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : str , ) -> List[str]:
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = feat_extract_norm
__lowerCAmelCase = feat_extract_activation
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = conv_bias
__lowerCAmelCase = num_buckets
__lowerCAmelCase = max_bucket_distance
__lowerCAmelCase = num_conv_pos_embeddings
__lowerCAmelCase = num_conv_pos_embedding_groups
__lowerCAmelCase = len(self.conv_dim )
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = feat_proj_dropout
__lowerCAmelCase = final_dropout
__lowerCAmelCase = layerdrop
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_ctc_classes
__lowerCAmelCase = vocab_size
__lowerCAmelCase = do_stable_layer_norm
__lowerCAmelCase = use_weighted_layer_sum
__lowerCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = apply_spec_augment
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowerCAmelCase = num_codevectors_per_group
__lowerCAmelCase = num_codevector_groups
__lowerCAmelCase = contrastive_logits_temperature
__lowerCAmelCase = num_negatives
__lowerCAmelCase = codevector_dim
__lowerCAmelCase = proj_codevector_dim
__lowerCAmelCase = diversity_loss_weight
# ctc loss
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# adapter
__lowerCAmelCase = add_adapter
__lowerCAmelCase = adapter_kernel_size
__lowerCAmelCase = adapter_stride
__lowerCAmelCase = num_adapter_layers
__lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = xvector_output_dim
@property
def lowercase ( self : Optional[Any] ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 207
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Any, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Tuple ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowerCAmelCase = TapasConfig.from_json_file(lowerCAmelCase_ )
# set absolute/relative position embeddings parameter
__lowerCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowerCAmelCase = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "WTQ":
# run_task_main.py hparams
__lowerCAmelCase = 4
__lowerCAmelCase = True
# hparam_utils.py hparams
__lowerCAmelCase = 0.66_4694
__lowerCAmelCase = 0.20_7951
__lowerCAmelCase = 0.12_1194
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = 0.035_2513
__lowerCAmelCase = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowerCAmelCase = 4
__lowerCAmelCase = False
# hparam_utils.py hparams
__lowerCAmelCase = 36.4519
__lowerCAmelCase = 0.90_3421
__lowerCAmelCase = 222.088
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = 0.76_3141
__lowerCAmelCase = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "TABFACT":
__lowerCAmelCase = TapasForSequenceClassification(config=lowerCAmelCase_ )
elif task == "MLM":
__lowerCAmelCase = TapasForMaskedLM(config=lowerCAmelCase_ )
elif task == "INTERMEDIATE_PRETRAINING":
__lowerCAmelCase = TapasModel(config=lowerCAmelCase_ )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowerCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt', model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase_ )
print('Used relative position embeddings:', model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 207
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
a : List[str] = logging.get_logger(__name__)
class _a ( _lowerCAmelCase ):
def __init__(self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> None:
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""", SCREAMING_SNAKE_CASE_, )
super().__init__(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
| 147
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : Any = logging.get_logger(__name__)
class _a ( _lowerCAmelCase ):
A = ['''pixel_values''']
def __init__(self, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = PIL.Image.BICUBIC, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 1 / 255, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = size if size is not None else {"""height""": 256, """width""": 256}
UpperCAmelCase_: str = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_: Any = get_size_dict(SCREAMING_SNAKE_CASE_, param_name="""crop_size""" )
UpperCAmelCase_: Dict = do_resize
UpperCAmelCase_: Tuple = size
UpperCAmelCase_: Dict = resample
UpperCAmelCase_: Union[str, Any] = do_center_crop
UpperCAmelCase_: List[str] = crop_size
UpperCAmelCase_: Optional[int] = do_rescale
UpperCAmelCase_: Dict = rescale_factor
UpperCAmelCase_: Optional[Any] = do_normalize
UpperCAmelCase_: Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_: Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = PIL.Image.BICUBIC, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCAmelCase_: int = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
SCREAMING_SNAKE_CASE_, size=(size["""height"""], size["""width"""]), resample=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCAmelCase_: int = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE_, size=(size["""height"""], size["""width"""]), data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> str:
return rescale(SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST, **SCREAMING_SNAKE_CASE_, ) -> PIL.Image.Image:
UpperCAmelCase_: str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_: str = resample if resample is not None else self.resample
UpperCAmelCase_: Any = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_: str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_: List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_: str = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_: Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_: Optional[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase_: List[str] = size if size is not None else self.size
UpperCAmelCase_: Any = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_: Dict = get_size_dict(SCREAMING_SNAKE_CASE_, param_name="""crop_size""" )
UpperCAmelCase_: Any = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase_: List[str] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCAmelCase_: Any = [self.resize(image=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
UpperCAmelCase_: Dict = [self.center_crop(image=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCAmelCase_: str = [self.rescale(image=SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCAmelCase_: Optional[int] = [self.normalize(image=SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCAmelCase_: Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCAmelCase_: Any = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_, tensor_type=SCREAMING_SNAKE_CASE_ )
| 147
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
__UpperCAmelCase = 5
__UpperCAmelCase = 10
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Dict = SpeechaTextTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Union[str, Any] = True
def snake_case_ ( self ) -> Optional[int]:
super().setUp()
UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(SCREAMING_SNAKE_CASE_ ) )]
UpperCamelCase : Tuple = dict(zip(SCREAMING_SNAKE_CASE_, range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase : Any = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_, save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SCREAMING_SNAKE_CASE_, save_dir / VOCAB_FILES_NAMES['spm_file'] )
UpperCamelCase : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = '<pad>'
UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<s>' )
self.assertEqual(vocab_keys[1], '<pad>' )
self.assertEqual(vocab_keys[-1], 'j' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), 1001 )
def snake_case_ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size, 1001 )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
UpperCamelCase : Optional[int] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ), [289, 50, 14, 174, 386], )
UpperCamelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_, [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'], )
UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
UpperCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_, [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'], )
@slow
def snake_case_ ( self ) -> List[Any]:
# fmt: off
UpperCamelCase : int = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_, model_name='facebook/s2t-small-mustc-en-de-st', revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad', )
@require_sentencepiece
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : int = "valhalla/s2t_mustc_multilinguial_medium"
UpperCAmelCase__ : int = "C'est trop cool"
UpperCAmelCase__ : List[str] = "Esto es genial"
@classmethod
def snake_case_ ( cls ) -> List[str]:
UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def snake_case_ ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.lang_code_to_id['pt'], 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'], 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'], 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'], 11 )
def snake_case_ ( self ) -> str:
self.assertEqual(self.tokenizer.vocab_size, 1_0000 )
def snake_case_ ( self ) -> str:
self.assertIn(SCREAMING_SNAKE_CASE_, self.tokenizer.all_special_ids )
UpperCamelCase : int = [ES_CODE, 4, 1601, 47, 7647, 2]
UpperCamelCase : List[Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = 'fr'
UpperCamelCase : Dict = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0], SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoded[-1], self.tokenizer.eos_token_id )
def snake_case_ ( self ) -> Any:
UpperCamelCase : int = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] )
UpperCamelCase : Tuple = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
| 369
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : List[str] = jnp.ones((batch_size, length) ) / length
return scores
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Optional[int] = 20
UpperCamelCase : Optional[Any] = self._get_uniform_logits(batch_size=2, length=SCREAMING_SNAKE_CASE_ )
# tweak scores to not be uniform anymore
UpperCamelCase : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCamelCase : Any = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCamelCase : List[str] = jax.nn.softmax(SCREAMING_SNAKE_CASE_, axis=-1 )
UpperCamelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase : int = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCamelCase : Tuple = jax.nn.softmax(temp_dist_warper_sharper(SCREAMING_SNAKE_CASE_, scores.copy(), cur_len=SCREAMING_SNAKE_CASE_ ), axis=-1 )
UpperCamelCase : Any = jax.nn.softmax(temp_dist_warper_smoother(SCREAMING_SNAKE_CASE_, scores.copy(), cur_len=SCREAMING_SNAKE_CASE_ ), axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min() )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = None
UpperCamelCase : Any = 10
UpperCamelCase : Any = 2
# create ramp distribution
UpperCamelCase : List[Any] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :], (batch_size, vocab_size) ).copy()
UpperCamelCase : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
UpperCamelCase : Tuple = top_k_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist(), 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist(), 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCamelCase : Optional[int] = 5
UpperCamelCase : Optional[int] = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3 )
UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :], (batch_size, length) ).copy()
UpperCamelCase : List[str] = top_k_warp_safety_check(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist(), [2, 2] )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : int = None
UpperCamelCase : List[str] = 10
UpperCamelCase : Optional[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCamelCase : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCamelCase : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
UpperCamelCase : int = np.exp(top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCamelCase : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCamelCase : Optional[Any] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :], (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCamelCase : Tuple = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
UpperCamelCase : int = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0 )
UpperCamelCase : List[str] = top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist(), [3, 2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = 20
UpperCamelCase : Union[str, Any] = 4
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that min length is applied at length 5
UpperCamelCase : List[str] = ids_tensor((batch_size, 20), vocab_size=20 )
UpperCamelCase : Any = 5
UpperCamelCase : Tuple = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = min_dist_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
UpperCamelCase : Any = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = 15
UpperCamelCase : str = min_dist_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : str = 20
UpperCamelCase : List[Any] = 4
UpperCamelCase : List[str] = 0
UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the bos_token_id score
UpperCamelCase : Any = ids_tensor((batch_size, 1), vocab_size=20 )
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCamelCase : Dict = 3
UpperCamelCase : str = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Union[str, Any] = 20
UpperCamelCase : Optional[Any] = 4
UpperCamelCase : List[Any] = 0
UpperCamelCase : int = 5
UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCamelCase : str = ids_tensor((batch_size, 4), vocab_size=20 )
UpperCamelCase : Tuple = 4
UpperCamelCase : Union[str, Any] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCamelCase : str = 3
UpperCamelCase : List[Any] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def snake_case_ ( self ) -> int:
UpperCamelCase : int = 4
UpperCamelCase : Tuple = 10
UpperCamelCase : str = 15
UpperCamelCase : List[str] = 2
UpperCamelCase : Any = 1
UpperCamelCase : List[str] = 15
# dummy input_ids and scores
UpperCamelCase : Dict = ids_tensor((batch_size, sequence_length), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = input_ids.copy()
UpperCamelCase : Any = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = scores.copy()
# instantiate all dist processors
UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = 10
# no processor list
UpperCamelCase : Any = temp_dist_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = top_k_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = min_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = bos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = eos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# with processor list
UpperCamelCase : List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCamelCase : Optional[Any] = processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[Any] = 4
UpperCamelCase : Tuple = 10
UpperCamelCase : Union[str, Any] = 15
UpperCamelCase : Union[str, Any] = 2
UpperCamelCase : Optional[Any] = 1
UpperCamelCase : int = 15
# dummy input_ids and scores
UpperCamelCase : Dict = ids_tensor((batch_size, sequence_length), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = input_ids.copy()
UpperCamelCase : Optional[int] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = scores.copy()
# instantiate all dist processors
UpperCamelCase : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
UpperCamelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCamelCase : str = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 10
# no processor list
def run_no_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = temp_dist_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = top_k_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = min_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = bos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = eos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
return scores
# with processor list
def run_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCamelCase : Union[str, Any] = processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
return scores
UpperCamelCase : Dict = jax.jit(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = jax.jit(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = jitted_run_no_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = jitted_run_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
| 103
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["PerceiverFeatureExtractor"]
lowercase_ = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45
|
"""simple docstring"""
lowercase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase ( lowerCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(lowerCAmelCase__ )
__a = ''''''.join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data )
__a = len(lowerCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__a = b'''=''' * ((6 - len(lowerCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6)
else:
__a = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowercase ( lowerCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(lowerCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
__a = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
__a = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__a = encoded_data[:-padding]
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
__a = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase__ ) , 8 )
]
return bytes(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 1
|
UpperCAmelCase = tuple[float, float, float]
UpperCAmelCase = tuple[float, float, float]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Vectorad:
"""simple docstring"""
snake_case_ = end_pointa[0] - end_pointa[0]
snake_case_ = end_pointa[1] - end_pointa[1]
snake_case_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Vectorad:
"""simple docstring"""
snake_case_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
snake_case_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
snake_case_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> bool:
"""simple docstring"""
return tuple(round(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for x in vector ) == (0, 0, 0)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 10 )-> bool:
"""simple docstring"""
snake_case_ = create_vector(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = create_vector(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return is_zero_vector(get_ad_vectors_cross(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 361
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
snake_case_ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE )
snake_case_ = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267
| 0
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> Optional[Any]:
while b:
__UpperCamelCase = b, a % b
return a
def A ( snake_case :int , snake_case :int ) -> Optional[int]:
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b )
def A ( ) -> Optional[int]:
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 316
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
return "".join(sorted(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
return word_by_signature[signature(UpperCAmelCase_ )]
snake_case : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
snake_case : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
snake_case : str = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
snake_case : Optional[int] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 94
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase : List[Any] ={
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 147
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class a_ ( _lowerCAmelCase ):
def __init__( self : List[Any] , **lowercase : Optional[int] ):
"""simple docstring"""
super().__init__(**lowercase )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , "vision" )
self.check_model_type(lowercase )
def __call__( self : Tuple , lowercase : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase : Union[str, List[str]] = None , **lowercase : str , ):
"""simple docstring"""
if "text_queries" in kwargs:
lowercase_ :List[Any] = kwargs.pop("text_queries" )
if isinstance(lowercase , (str, Image.Image) ):
lowercase_ :List[str] = {"image": image, "candidate_labels": candidate_labels}
else:
lowercase_ :Optional[Any] = image
lowercase_ :str = super().__call__(lowercase , **lowercase )
return results
def lowercase__ ( self : Optional[int] , **lowercase : List[str] ):
"""simple docstring"""
lowercase_ :Tuple = {}
if "threshold" in kwargs:
lowercase_ :Dict = kwargs["threshold"]
if "top_k" in kwargs:
lowercase_ :Optional[Any] = kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase__ ( self : List[Any] , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Any = load_image(inputs["image"] )
lowercase_ :List[str] = inputs["candidate_labels"]
if isinstance(lowercase , lowercase ):
lowercase_ :Union[str, Any] = candidate_labels.split("," )
lowercase_ :Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase ):
lowercase_ :Union[str, Any] = self.tokenizer(lowercase , return_tensors=self.framework )
lowercase_ :Tuple = self.image_processor(lowercase , return_tensors=self.framework )
yield {
"is_last": i == len(lowercase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : List[str] , lowercase : List[str] ):
"""simple docstring"""
lowercase_ :Dict = model_inputs.pop("target_size" )
lowercase_ :str = model_inputs.pop("candidate_label" )
lowercase_ :List[Any] = model_inputs.pop("is_last" )
lowercase_ :Optional[Any] = self.model(**lowercase )
lowercase_ :str = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase__ ( self : Optional[int] , lowercase : List[str] , lowercase : List[str]=0.1 , lowercase : Optional[int]=None ):
"""simple docstring"""
lowercase_ :Dict = []
for model_output in model_outputs:
lowercase_ :int = model_output["candidate_label"]
lowercase_ :str = BaseModelOutput(lowercase )
lowercase_ :List[Any] = self.image_processor.post_process_object_detection(
outputs=lowercase , threshold=lowercase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
lowercase_ :Optional[int] = outputs["scores"][index].item()
lowercase_ :int = self._get_bounding_box(outputs["boxes"][index][0] )
lowercase_ :int = {"score": score, "label": label, "box": box}
results.append(lowercase )
lowercase_ :Dict = sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )
if top_k:
lowercase_ :List[str] = results[:top_k]
return results
def lowercase__ ( self : Union[str, Any] , lowercase : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
lowercase_ , lowercase_ , lowercase_ , lowercase_ :List[str] = box.int().tolist()
lowercase_ :List[Any] = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 147
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : int = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207
|
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 207
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : str=3 , UpperCamelCase : Any=18 , UpperCamelCase : Optional[Any]=30 , UpperCamelCase : str=400 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=True , UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__UpperCAmelCase : Any = size if size is not None else {"""height""": 18, """width""": 18}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : List[Any] = min_resolution
__UpperCAmelCase : Dict = max_resolution
__UpperCAmelCase : Tuple = do_resize
__UpperCAmelCase : str = size
__UpperCAmelCase : List[Any] = do_normalize
__UpperCAmelCase : Optional[int] = image_mean
__UpperCAmelCase : int = image_std
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = DPTImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = DPTImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
__UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__UpperCAmelCase : Any = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
__UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__UpperCAmelCase : int = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
__UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__UpperCAmelCase : Tuple = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 320
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ):
'''simple docstring'''
__UpperCAmelCase : int = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : Optional[Any] = use_input_mask
__UpperCAmelCase : Tuple = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Optional[int] = block_sizes
__UpperCAmelCase : Optional[Any] = num_decoder_layers
__UpperCAmelCase : Union[str, Any] = d_model
__UpperCAmelCase : Dict = n_head
__UpperCAmelCase : Optional[Any] = d_head
__UpperCAmelCase : Dict = d_inner
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout
__UpperCAmelCase : List[Any] = attention_dropout
__UpperCAmelCase : str = activation_dropout
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : str = 2
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : List[Any] = num_choices
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Dict = initializer_std
# Used in the tests to check the size of the first attention layer
__UpperCAmelCase : Dict = n_head
# Used in the tests to check the size of the first hidden state
__UpperCAmelCase : Dict = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__UpperCAmelCase : List[Any] = self.num_hidden_layers + 2
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[str] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : int = None
if self.use_token_type_ids:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase )
__UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : List[str] = model(UpperCamelCase )
__UpperCAmelCase : List[Any] = [input_ids, input_mask]
__UpperCAmelCase : Dict = model(UpperCamelCase )
__UpperCAmelCase : Tuple = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase )
__UpperCAmelCase : List[str] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase )
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase )
__UpperCAmelCase : int = [input_ids, input_mask]
__UpperCAmelCase : int = model(UpperCamelCase )
__UpperCAmelCase : List[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__UpperCAmelCase : int = False
__UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase )
__UpperCAmelCase : str = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase )
__UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : int = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ):
'''simple docstring'''
__UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase )
__UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Tuple = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : List[str] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__UpperCAmelCase : int = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase )
__UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : int = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase )
__UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Any = model(UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,
) : Dict = config_and_inputs
__UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__a = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__a = False
__a = False
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = TFFunnelModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
@require_tf
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__a = False
__a = False
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
| 320
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Tuple = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCAmelCase_ (UpperCamelCase_ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'xlm-roberta-xl'
def __init__( self , SCREAMING_SNAKE_CASE_=25_08_80 , SCREAMING_SNAKE_CASE_=25_60 , SCREAMING_SNAKE_CASE_=36 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=1_02_40 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_14 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-05 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Any:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__lowerCamelCase : Tuple = vocab_size
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Dict = type_vocab_size
__lowerCamelCase : str = initializer_range
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : Optional[Any] = position_embedding_type
__lowerCamelCase : Optional[Any] = use_cache
__lowerCamelCase : List[str] = classifier_dropout
class UpperCAmelCase_ (UpperCamelCase_ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Any:
if self.task == "multiple-choice":
__lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 185
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A__ : str = logging.get_logger(__name__)
A__ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : str = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
A__ : Union[str, Any] = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
A__ : Dict = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class __snake_case ( UpperCamelCase_ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = RealmTokenizer
def __init__( self : int , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : Optional[Any]=True , A_ : Optional[int]="[UNK]" , A_ : List[Any]="[SEP]" , A_ : List[Any]="[PAD]" , A_ : Optional[Any]="[CLS]" , A_ : Dict="[MASK]" , A_ : List[Any]=True , A_ : List[str]=None , **A_ : List[str] , ):
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , A_) != do_lower_case
or normalizer_state.get('''strip_accents''' , A_) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A_) != tokenize_chinese_chars
):
lowerCAmelCase_ : int = getattr(A_ , normalizer_state.pop('''type'''))
lowerCAmelCase_ : str = do_lower_case
lowerCAmelCase_ : Dict = strip_accents
lowerCAmelCase_ : Optional[Any] = tokenize_chinese_chars
lowerCAmelCase_ : Union[str, Any] = normalizer_class(**A_)
lowerCAmelCase_ : Any = do_lower_case
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Optional[Any] , **A_ : Tuple):
lowerCAmelCase_ : List[str] = PaddingStrategy.MAX_LENGTH
lowerCAmelCase_ : str = text
lowerCAmelCase_ : int = kwargs.pop('''text_pair''' , A_)
lowerCAmelCase_ : str = kwargs.pop('''return_tensors''' , A_)
lowerCAmelCase_ : int = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(A_):
if batch_text_pair is not None:
lowerCAmelCase_ : List[Any] = batch_text_pair[idx]
else:
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : int = super().__call__(A_ , A_ , return_tensors=A_ , **A_)
lowerCAmelCase_ : Optional[Any] = encoded_candidates.get('''input_ids''')
lowerCAmelCase_ : List[str] = encoded_candidates.get('''attention_mask''')
lowerCAmelCase_ : Optional[Any] = encoded_candidates.get('''token_type_ids''')
if encoded_input_ids is not None:
output_data["input_ids"].append(A_)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(A_)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(A_)
lowerCAmelCase_ : List[str] = {key: item for key, item in output_data.items() if len(A_) != 0}
return BatchEncoding(A_ , tensor_type=A_)
def UpperCAmelCase__ ( self : List[str] , A_ : Tuple , A_ : List[Any]=None):
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None):
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase__ ( self : List[str] , A_ : str , A_ : Optional[str] = None):
lowerCAmelCase_ : List[str] = self._tokenizer.model.save(A_ , name=A_)
return tuple(A_)
| 103
| 0
|
from __future__ import annotations
UpperCAmelCase_ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( A__ : Matrix , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( A__ : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( A__ : Matrix ):
'''simple docstring'''
if location := find_empty_location(A__ ):
__lowerCamelCase, __lowerCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A__ , A__ , A__ , A__ ):
__lowerCamelCase = digit
if sudoku(A__ ) is not None:
return grid
__lowerCamelCase = 0
return None
def lowerCamelCase__ ( A__ : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(A__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
UpperCAmelCase_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 29
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _a ( ):
__lowerCAmelCase = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=SCREAMING_SNAKE_CASE_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=SCREAMING_SNAKE_CASE_ )
return parser.parse_args()
def _a ( ):
__lowerCAmelCase = parse_args()
# Import training_script as a module.
__lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase = script_fpath.stem
__lowerCAmelCase = importlib.import_module(SCREAMING_SNAKE_CASE_ )
# Patch sys.argv
__lowerCAmelCase = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 92
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "linear"
lowerCAmelCase__ = "cosine"
lowerCAmelCase__ = "cosine_with_restarts"
lowerCAmelCase__ = "polynomial"
lowerCAmelCase__ = "constant"
lowerCAmelCase__ = "constant_with_warmup"
lowerCAmelCase__ = "piecewise_constant"
def a__ ( a__ , a__ = -1 ):
"""simple docstring"""
return LambdaLR(a__ , lambda a__ : 1 , last_epoch=a__ )
def a__ ( a__ , a__ , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1.0 , a__ ) )
return 1.0
return LambdaLR(a__ , a__ , last_epoch=a__ )
def a__ ( a__ , a__ , a__ = -1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(a__ )
__SCREAMING_SNAKE_CASE = float(a__ )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(a__ , a__ ):
def rule_func(a__ ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(a__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(a__ , a__ )
return LambdaLR(a__ , a__ , last_epoch=a__ )
def a__ ( a__ , a__ , a__ , a__=-1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__ = 0.5 , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(a__ ) * 2.0 * progress )) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__ = 1 , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(a__ ) * progress) % 1.0) )) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__=1E-7 , a__=1.0 , a__=-1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(a__ , a__ , a__ )
UpperCAmelCase : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a__ ( a__ , a__ , a__ = None , a__ = None , a__ = None , a__ = 1 , a__ = 1.0 , a__ = -1 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = SchedulerType(a__ )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(a__ , last_epoch=a__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(a__ , step_rules=a__ , last_epoch=a__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(a__ , num_warmup_steps=a__ , last_epoch=a__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , num_cycles=a__ , last_epoch=a__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , power=a__ , last_epoch=a__ , )
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , last_epoch=a__ )
| 267
| 0
|
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case =1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Any=1_3 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : int=1_4 , UpperCAmelCase__ : List[Any]=1_0 , UpperCAmelCase__ : Optional[int]=1_9 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=1_6 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[Any]=[1, 2, 3, 4, 5] , UpperCAmelCase__ : Optional[Any]=2_5 , UpperCAmelCase__ : Tuple=5 , ) -> List[str]:
lowerCAmelCase = d_model
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = prediction_length
lowerCAmelCase = context_length
lowerCAmelCase = cardinality
lowerCAmelCase = num_time_features
lowerCAmelCase = lags_sequence
lowerCAmelCase = embedding_dimension
lowerCAmelCase = is_training
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = context_length
lowerCAmelCase = prediction_length + label_length
lowerCAmelCase = label_length
lowerCAmelCase = moving_average
lowerCAmelCase = autocorrelation_factor
def __UpperCAmelCase ( self : str ) -> int:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
lowerCAmelCase = config.context_length + max(config.lags_sequence )
lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
lowerCAmelCase = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase = self.get_config()
lowerCAmelCase = self.prepare_autoformer_inputs_dict(UpperCAmelCase__ )
return config, inputs_dict
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> int:
lowerCAmelCase = AutoformerModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval()
lowerCAmelCase = model(**UpperCAmelCase__ )
lowerCAmelCase = outputs.encoder_last_hidden_state
lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase__ )
lowerCAmelCase = AutoformerEncoder.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = model.create_network_inputs(**UpperCAmelCase__ )
lowerCAmelCase , lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCAmelCase = encoder(inputs_embeds=UpperCAmelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase__ )
lowerCAmelCase = AutoformerDecoder.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ )
lowerCAmelCase = decoder(
trend=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCamelCase : Any = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCamelCase : Union[str, Any] = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
lowerCamelCase : str = False
lowerCamelCase : List[Any] = False
lowerCamelCase : int = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[Any] = False
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
lowerCAmelCase = AutoformerModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Any:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ )
lowerCAmelCase , lowerCAmelCase = model_class.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertEqual(info['missing_keys'] , [] )
def __UpperCAmelCase ( self : List[Any] ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase__ )
@unittest.skip(reason='Model has no tokens embeddings' )
def __UpperCAmelCase ( self : int ) -> str:
pass
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = inspect.signature(getattr(UpperCAmelCase__ , 'forward' ) )
# The main input is the name of the argument after `self`
lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(UpperCAmelCase__ )] , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = getattr(self.model_tester , 'seq_length' , UpperCAmelCase__ )
lowerCAmelCase = getattr(self.model_tester , 'decoder_seq_length' , UpperCAmelCase__ )
lowerCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , UpperCAmelCase__ )
lowerCAmelCase = getattr(self.model_tester , 'd_model' , UpperCAmelCase__ )
lowerCAmelCase = getattr(self.model_tester , 'num_attention_heads' , UpperCAmelCase__ )
lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# decoder attentions
lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(UpperCAmelCase__ , (list, tuple) )
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(UpperCAmelCase__ , (list, tuple) )
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCAmelCase__ ) )
lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __UpperCAmelCase ( self : Any ) -> Dict:
super().test_retain_grad_hidden_states_attentions()
def a_ ( lowerCamelCase : Dict="train-batch.pt" ):
lowerCAmelCase = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=lowerCamelCase , repo_type='dataset' )
lowerCAmelCase = torch.load(lowerCamelCase , map_location=lowerCamelCase )
return batch
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(UpperCAmelCase__ )
lowerCAmelCase = prepare_batch()
with torch.no_grad():
lowerCAmelCase = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
lowerCAmelCase = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=UpperCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
lowerCAmelCase = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(UpperCAmelCase__ )
lowerCAmelCase = prepare_batch('val-batch.pt' )
with torch.no_grad():
lowerCAmelCase = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
lowerCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=UpperCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Tuple ) -> str:
lowerCAmelCase = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(UpperCAmelCase__ )
lowerCAmelCase = prepare_batch('val-batch.pt' )
with torch.no_grad():
lowerCAmelCase = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
lowerCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=UpperCAmelCase__ )
lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCAmelCase__ , rtol=1E-1 ) )
| 361
|
'''simple docstring'''
from __future__ import annotations
def a_ ( lowerCamelCase : list[float] , lowerCamelCase : list[float] ):
lowerCAmelCase = sorted(numsa + numsa )
lowerCAmelCase , lowerCAmelCase = divmod(len(lowerCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case =[float(x) for x in input("""Enter the elements of first array: """).split()]
__snake_case =[float(x) for x in input("""Enter the elements of second array: """).split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 55
| 0
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 147
|
from __future__ import annotations
a : str = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCAmelCase_: str = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase_: dict[str, str | None] = {}
UpperCAmelCase_: int = source_vertex
def __snake_case (self ) -> None:
UpperCAmelCase_: List[Any] = {self.source_vertex}
UpperCAmelCase_: Dict = None
UpperCAmelCase_: str = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase_: Any = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = vertex
queue.append(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase_: Any = self.parent.get(SCREAMING_SNAKE_CASE_ )
if target_vertex_parent is None:
UpperCAmelCase_: Any = (
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
return self.shortest_path(SCREAMING_SNAKE_CASE_ ) + f'->{target_vertex}'
if __name__ == "__main__":
a : Optional[Any] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 147
| 1
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCamelCase__ ( A : str = "isbn/0140328726" ):
'''simple docstring'''
UpperCAmelCase = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
UpperCAmelCase = f"""{olid} is not a valid Open Library olid"""
raise ValueError(A )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
UpperCAmelCase = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(A , A ):
UpperCAmelCase = ''', '''.join(A )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase : List[Any] = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase : List[Any] = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print("""\n""".join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 91
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = ["image_processor", "tokenizer"]
__magic_name__ : Tuple = "ViTImageProcessor"
__magic_name__ : int = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : int , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : int=None , **lowerCAmelCase : Tuple )-> Optional[int]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : List[Any] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : str )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 91
| 1
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : str , _a : Any , _a : List[str]=13 , _a : Union[str, Any]=7 , _a : List[Any]=True , _a : Optional[int]=True , _a : Tuple=True , _a : Optional[Any]=True , _a : Optional[int]=99 , _a : str=24 , _a : str=2 , _a : Union[str, Any]=6 , _a : Union[str, Any]=37 , _a : Any="gelu" , _a : Any=0.1 , _a : List[Any]=0.1 , _a : List[str]=512 , _a : Tuple=16 , _a : List[str]=2 , _a : int=0.02 , _a : Optional[int]=3 , _a : Any=None , _a : int=1000 , ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_input_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =range_bbox
def A ( self : int ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_SCREAMING_SNAKE_CASE =bbox[i, j, 3]
_SCREAMING_SNAKE_CASE =bbox[i, j, 1]
_SCREAMING_SNAKE_CASE =t
if bbox[i, j, 2] < bbox[i, j, 0]:
_SCREAMING_SNAKE_CASE =bbox[i, j, 2]
_SCREAMING_SNAKE_CASE =bbox[i, j, 0]
_SCREAMING_SNAKE_CASE =t
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : int , _a : Any , _a : str , _a : Union[str, Any] , _a : Any , _a : Tuple , _a : List[Any] , _a : Dict , ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LiltModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , bbox=_a , attention_mask=_a , token_type_ids=_a )
_SCREAMING_SNAKE_CASE =model(_a , bbox=_a , token_type_ids=_a )
_SCREAMING_SNAKE_CASE =model(_a , bbox=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : Optional[int] , _a : Tuple , _a : List[str] , _a : Optional[Any] , _a : Any , _a : Tuple , _a : Any , _a : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =LiltForTokenClassification(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_a , bbox=_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Tuple , _a : str , _a : List[str] , _a : Optional[Any] , _a : Union[str, Any] , _a : Dict , _a : List[Any] , _a : str , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LiltForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_a , bbox=_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , A__ , unittest.TestCase ):
A__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = False
A__ = False
def A ( self : Tuple , _a : Union[str, Any] , _a : int , _a : List[Any] , _a : Optional[int] , _a : int ) -> Optional[Any]:
'''simple docstring'''
return True
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LiltModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 )
def A ( self : Dict ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE =type
self.model_tester.create_and_check_model(*_a )
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def A ( self : List[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@slow
def A ( self : Optional[int] ) -> Dict:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =LiltModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@slow
class A__ ( unittest.TestCase ):
def A ( self : Dict ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(_a )
_SCREAMING_SNAKE_CASE =torch.tensor([[1, 2]] , device=_a )
_SCREAMING_SNAKE_CASE =torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(input_ids=_a , bbox=_a )
_SCREAMING_SNAKE_CASE =torch.Size([1, 2, 768] )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=_a , )
self.assertTrue(outputs.last_hidden_state.shape , _a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _a , atol=1e-3 ) )
| 47
|
import qiskit
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase : List[str] =qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase : Dict =qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
__snake_case = half_adder(1, 1)
print(f'Half Adder Output Qubit Counts: {counts}')
| 348
| 0
|
"""simple docstring"""
a_ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 163
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = GPTSanJapaneseTokenizer
__UpperCamelCase = False
__UpperCamelCase = {'do_clean_text': False, 'add_prefix_space': False}
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
_A = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_A = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_A = {"unk_token": "<unk>"}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(a__ ) )
def a_ ( self : str , **a__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : Dict , a__ : str ) -> int:
'''simple docstring'''
_A = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_A = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def a_ ( self : Union[str, Any] , a__ : Optional[int] ) -> Any:
'''simple docstring'''
_A , _A = self.get_input_output_texts(a__ )
_A = tokenizer.encode(a__ , add_special_tokens=a__ )
_A = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
return text, ids
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
pass # TODO add if relevant
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = self.get_tokenizer()
# Testing tokenization
_A = "こんにちは、世界。 こんばんは、㔺界。"
_A = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_A = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_A = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_A = tokens + [tokenizer.unk_token]
_A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_A = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , a__ )
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
_A = self.get_tokenizer()
# Testing tokenization
_A = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_A = "こんにちは、、、、世界。こんばんは、、、、世界。"
_A = tokenizer.encode(a__ )
_A = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
@slow
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_A = "こんにちは、世界。"
_A = "こんばんは、㔺界。😀"
_A = "こんにちは、世界。こんばんは、世界。😀"
_A = tokenizer.encode(prefix_text + input_text )
_A = tokenizer.encode("" , prefix_text=prefix_text + input_text )
_A = tokenizer.encode(a__ , prefix_text=a__ )
_A = tokenizer.decode(a__ )
_A = tokenizer.decode(a__ )
_A = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
@slow
def a_ ( self : str ) -> str:
'''simple docstring'''
_A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_A = "こんにちは、世界。"
_A = "こんばんは、㔺界。😀"
_A = len(tokenizer.encode(a__ ) ) - 2
_A = len(tokenizer.encode(a__ ) ) - 2
_A = [1] + [0] * (len_prefix + len_text + 1)
_A = [1] * (len_prefix + len_text + 1) + [0]
_A = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_A = tokenizer(prefix_text + input_text ).token_type_ids
_A = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
_A = tokenizer(a__ , prefix_text=a__ ).token_type_ids
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
@slow
def a_ ( self : int ) -> Dict:
'''simple docstring'''
_A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_A = tokenizer.encode("あンいワ" )
_A = tokenizer.encode("" , prefix_text="あンいワ" )
_A = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) )
self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) )
self.assertNotEqual(a__ , a__ )
self.assertNotEqual(a__ , a__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_A = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_A = tokenizer(a__ , padding=a__ )
_A = tokenizer.batch_encode_plus(a__ , padding=a__ )
# fmt: off
_A = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
_A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , a__ )
self.assertListEqual(x_token.token_type_ids , a__ )
self.assertListEqual(x_token.attention_mask , a__ )
self.assertListEqual(x_token_a.input_ids , a__ )
self.assertListEqual(x_token_a.token_type_ids , a__ )
self.assertListEqual(x_token_a.attention_mask , a__ )
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
| 163
| 1
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__UpperCAmelCase = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCAmelCase = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCAmelCase = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCAmelCase = {
'num_train_timesteps': 40,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
__UpperCAmelCase = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
__UpperCAmelCase = {
'num_train_timesteps': 151,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowercase__ ( __snake_case : Any , __snake_case : Dict , __snake_case : List[Any] , __snake_case : str , __snake_case : Dict=False ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = checkpoint[F"{old_prefix}.in_layers.0.weight"]
UpperCAmelCase_ : str = checkpoint[F"{old_prefix}.in_layers.0.bias"]
UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.in_layers.2.weight"]
UpperCAmelCase_ : Optional[Any] = checkpoint[F"{old_prefix}.in_layers.2.bias"]
UpperCAmelCase_ : int = checkpoint[F"{old_prefix}.emb_layers.1.weight"]
UpperCAmelCase_ : Any = checkpoint[F"{old_prefix}.emb_layers.1.bias"]
UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.out_layers.0.weight"]
UpperCAmelCase_ : Tuple = checkpoint[F"{old_prefix}.out_layers.0.bias"]
UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.out_layers.3.weight"]
UpperCAmelCase_ : Optional[int] = checkpoint[F"{old_prefix}.out_layers.3.bias"]
if has_skip:
UpperCAmelCase_ : Tuple = checkpoint[F"{old_prefix}.skip_connection.weight"]
UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def lowercase__ ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict=None ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.norm.weight"]
UpperCAmelCase_ : int = checkpoint[F"{old_prefix}.norm.bias"]
UpperCAmelCase_ : Tuple = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : str = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Optional[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Any = (
checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ : Optional[int] = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = torch.load(__snake_case , map_location='cpu' )
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Optional[int] = checkpoint['time_embed.0.weight']
UpperCAmelCase_ : str = checkpoint['time_embed.0.bias']
UpperCAmelCase_ : str = checkpoint['time_embed.2.weight']
UpperCAmelCase_ : str = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ : Any = checkpoint['label_emb.weight']
UpperCAmelCase_ : Dict = checkpoint['input_blocks.0.0.weight']
UpperCAmelCase_ : List[str] = checkpoint['input_blocks.0.0.bias']
UpperCAmelCase_ : List[Any] = unet_config['down_block_types']
UpperCAmelCase_ : Any = unet_config['layers_per_block']
UpperCAmelCase_ : Optional[Any] = unet_config['attention_head_dim']
UpperCAmelCase_ : Union[str, Any] = unet_config['block_out_channels']
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : str = channels_list[0]
for i, layer_type in enumerate(__snake_case ):
UpperCAmelCase_ : List[Any] = channels_list[i]
UpperCAmelCase_ : Union[str, Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__snake_case ):
UpperCAmelCase_ : Tuple = F"down_blocks.{i}.resnets.{j}"
UpperCAmelCase_ : Dict = F"input_blocks.{current_layer}.0"
UpperCAmelCase_ : Union[str, Any] = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ : Optional[int] = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[int] = F"down_blocks.{i}.resnets.{j}"
UpperCAmelCase_ : str = F"input_blocks.{current_layer}.0"
UpperCAmelCase_ : Dict = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
UpperCAmelCase_ : Dict = F"down_blocks.{i}.attentions.{j}"
UpperCAmelCase_ : List[Any] = F"input_blocks.{current_layer}.1"
UpperCAmelCase_ : List[str] = convert_attention(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
current_layer += 1
if i != len(__snake_case ) - 1:
UpperCAmelCase_ : Dict = F"down_blocks.{i}.downsamplers.0"
UpperCAmelCase_ : Optional[Any] = F"input_blocks.{current_layer}.0"
UpperCAmelCase_ : Any = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
current_layer += 1
UpperCAmelCase_ : Optional[int] = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ : int = 'mid_block.resnets.0'
UpperCAmelCase_ : Optional[Any] = 'middle_block.0'
UpperCAmelCase_ : Union[str, Any] = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : Optional[Any] = 'mid_block.attentions.0'
UpperCAmelCase_ : Union[str, Any] = 'middle_block.1'
UpperCAmelCase_ : List[Any] = convert_attention(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : int = 'mid_block.resnets.1'
UpperCAmelCase_ : Dict = 'middle_block.2'
UpperCAmelCase_ : str = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Union[str, Any] = unet_config['up_block_types']
for i, layer_type in enumerate(__snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ : Dict = F"up_blocks.{i}.resnets.{j}"
UpperCAmelCase_ : Dict = F"output_blocks.{current_layer}.0"
UpperCAmelCase_ : Dict = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
current_layer += 1
if i != len(__snake_case ) - 1:
UpperCAmelCase_ : Dict = F"up_blocks.{i}.upsamplers.0"
UpperCAmelCase_ : Optional[Any] = F"output_blocks.{current_layer-1}.1"
UpperCAmelCase_ : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ : Optional[int] = F"up_blocks.{i}.resnets.{j}"
UpperCAmelCase_ : List[Any] = F"output_blocks.{current_layer}.0"
UpperCAmelCase_ : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
UpperCAmelCase_ : List[str] = F"up_blocks.{i}.attentions.{j}"
UpperCAmelCase_ : Optional[Any] = F"output_blocks.{current_layer}.1"
UpperCAmelCase_ : Optional[Any] = convert_attention(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
current_layer += 1
if i != len(__snake_case ) - 1:
UpperCAmelCase_ : List[str] = F"up_blocks.{i}.upsamplers.0"
UpperCAmelCase_ : List[str] = F"output_blocks.{current_layer-1}.2"
UpperCAmelCase_ : str = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : Optional[Any] = checkpoint['out.0.weight']
UpperCAmelCase_ : str = checkpoint['out.0.bias']
UpperCAmelCase_ : List[str] = checkpoint['out.2.weight']
UpperCAmelCase_ : str = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = strabool(args.class_cond)
__UpperCAmelCase = os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
__UpperCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__UpperCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
__UpperCAmelCase = None
__UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
__UpperCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__UpperCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
__UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
__UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 29
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCAmelCase_( lowercase_ : Any ) -> Dict:
_lowerCamelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowerCamelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowerCamelCase = 4
_lowerCamelCase = 48
_lowerCamelCase = '''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowerCamelCase = [6, 6, 6, 6]
_lowerCamelCase = 60
_lowerCamelCase = [6, 6, 6, 6]
_lowerCamelCase = '''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowerCamelCase = 4
_lowerCamelCase = '''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowerCamelCase = 1
_lowerCamelCase = 1
_lowerCamelCase = 1_26
_lowerCamelCase = 7
_lowerCamelCase = 2_55.0
_lowerCamelCase = ''''''
return config
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : int ) -> List[str]:
if "patch_embed.proj" in name and "layers" not in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
_lowerCamelCase = name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
_lowerCamelCase = name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
_lowerCamelCase = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
_lowerCamelCase = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
_lowerCamelCase = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
_lowerCamelCase = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
_lowerCamelCase = '''layernorm.weight'''
if name == "norm.bias":
_lowerCamelCase = '''layernorm.bias'''
if "conv_first" in name:
_lowerCamelCase = name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowerCamelCase = name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowerCamelCase = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
_lowerCamelCase = name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
_lowerCamelCase = name.replace('''upsample.2''' , '''upsample.convolution_1''' )
_lowerCamelCase = '''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
_lowerCamelCase = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
_lowerCamelCase = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
_lowerCamelCase = '''swin2sr.''' + name
return name
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] ) -> Dict:
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[1] )
_lowerCamelCase = int(key_split[4] )
_lowerCamelCase = config.embed_dim
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
pass
else:
_lowerCamelCase = val
return orig_state_dict
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any ) -> Tuple:
_lowerCamelCase = get_config(lowercase_ )
_lowerCamelCase = SwinaSRForImageSuperResolution(lowercase_ )
model.eval()
_lowerCamelCase = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )
_lowerCamelCase = convert_state_dict(lowercase_ , lowercase_ )
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(lowercase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
_lowerCamelCase = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
_lowerCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('''RGB''' )
_lowerCamelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowerCamelCase = 1_26 if '''Jpeg''' in checkpoint_url else 2_56
_lowerCamelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowerCamelCase = transforms(lowercase_ ).unsqueeze(0 )
if config.num_channels == 1:
_lowerCamelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowerCamelCase = model(lowercase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowerCamelCase = torch.Size([1, 3, 5_12, 5_12] )
_lowerCamelCase = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowerCamelCase = torch.Size([1, 3, 10_24, 10_24] )
_lowerCamelCase = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowerCamelCase = torch.Size([1, 3, 10_24, 10_24] )
_lowerCamelCase = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowerCamelCase = torch.Size([1, 3, 5_12, 5_12] )
_lowerCamelCase = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowerCamelCase = torch.Size([1, 3, 10_24, 10_24] )
_lowerCamelCase = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1e-3 )
print('''Looks ok!''' )
_lowerCamelCase = {
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
_lowerCamelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 365
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'poolformer'
def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=1_6 , lowerCamelCase__=1_6 , lowerCamelCase__=3 , lowerCamelCase__=4.0 , lowerCamelCase__=[2, 2, 6, 2] , lowerCamelCase__=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowerCamelCase__=[7, 3, 3, 3] , lowerCamelCase__=[4, 2, 2, 2] , lowerCamelCase__=[2, 1, 1, 1] , lowerCamelCase__=4 , lowerCamelCase__=0.0 , lowerCamelCase__="gelu" , lowerCamelCase__=True , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0_2 , **lowerCamelCase__ , ):
_lowerCamelCase = num_channels
_lowerCamelCase = patch_size
_lowerCamelCase = stride
_lowerCamelCase = padding
_lowerCamelCase = pool_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = mlp_ratio
_lowerCamelCase = depths
_lowerCamelCase = patch_sizes
_lowerCamelCase = strides
_lowerCamelCase = num_encoder_blocks
_lowerCamelCase = drop_path_rate
_lowerCamelCase = hidden_act
_lowerCamelCase = use_layer_scale
_lowerCamelCase = layer_scale_init_value
_lowerCamelCase = initializer_range
super().__init__(**lowerCamelCase__ )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str = version.parse('1.11' )
@property
def snake_case__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case__ ( self ):
return 2e-3
| 73
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__lowercase = logging.getLogger(__name__)
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : Dict = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCAmelCase : Union[str, Any] = field(
default=_a ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase : Dict = field(
default=_a ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCAmelCase : Union[str, Any] = field(
default=_a ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
UpperCAmelCase : Tuple = field(default=_a ,metadata={"""help""": """Whether tp freeze the encoder."""} )
UpperCAmelCase : str = field(default=_a ,metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCAmelCase : Optional[Any] = field(
default="""summarization""" ,metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} ,)
UpperCAmelCase : Any = field(
default=1_0_2_4 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
UpperCAmelCase : Any = field(
default=1_2_8 ,metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
UpperCAmelCase : List[str] = field(
default=1_4_2 ,metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} ,)
UpperCAmelCase : Tuple = field(
default=1_4_2 ,metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
UpperCAmelCase : int = field(default=-1 ,metadata={"""help""": """# training examples. -1 means use all."""} )
UpperCAmelCase : int = field(default=-1 ,metadata={"""help""": """# validation examples. -1 means use all."""} )
UpperCAmelCase : int = field(default=-1 ,metadata={"""help""": """# test examples. -1 means use all."""} )
UpperCAmelCase : str = field(default=_a ,metadata={"""help""": """Source language id for translation."""} )
UpperCAmelCase : List[str] = field(default=_a ,metadata={"""help""": """Target language id for translation."""} )
UpperCAmelCase : Optional[int] = field(default=_a ,metadata={"""help""": """# num_beams to use for evaluation."""} )
UpperCAmelCase : Optional[int] = field(
default=_a ,metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} ,)
def lowercase ( A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , F'''{split}_results.json''' ) )
def lowercase ( )-> Union[str, Any]:
'''simple docstring'''
a : int = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a : int = parser.parse_args_into_dataclasses()
check_output_dir(UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a : str = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
assert hasattr(UpperCAmelCase_ , UpperCAmelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCAmelCase_ , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
a : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a : str = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCAmelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a : List[str] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCAmelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a : str = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCAmelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a : Any = SeqaSeqDataset
# Get datasets
a : Any = (
dataset_class(
UpperCAmelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
a : Optional[int] = (
dataset_class(
UpperCAmelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a : Any = (
dataset_class(
UpperCAmelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a : Any = (
build_compute_metrics_fn(data_args.task , UpperCAmelCase_ ) if training_args.predict_with_generate else None
)
a : Union[str, Any] = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , data_args=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , data_collator=SeqaSeqDataCollator(
UpperCAmelCase_ , UpperCAmelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
a : Optional[int] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
a : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a : List[str] = train_result.metrics
a : Union[str, Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a : Any = trainer.evaluate(metric_key_prefix="val" )
a : Optional[int] = data_args.n_val
a : Any = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
a : Tuple = trainer.predict(test_dataset=UpperCAmelCase_ , metric_key_prefix="test" )
a : List[Any] = test_output.metrics
a : List[Any] = data_args.n_test
if trainer.is_world_process_zero():
a : Tuple = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
if training_args.predict_with_generate:
a : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
a : Any = lmap(str.strip , UpperCAmelCase_ )
write_txt_file(UpperCAmelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCAmelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 40
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self , UpperCamelCase = 1 , UpperCamelCase = 2000 , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = self.unet.config.sample_size
lowerCamelCase_ = (batch_size, 3, img_size, img_size)
lowerCamelCase_ = self.unet
lowerCamelCase_ = randn_tensor(UpperCamelCase , generator=UpperCamelCase ) * self.scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase )
self.scheduler.set_sigmas(UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCamelCase_ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCamelCase_ = self.unet(UpperCamelCase , UpperCamelCase ).sample
lowerCamelCase_ = self.scheduler.step_correct(UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
# prediction step
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase ).sample
lowerCamelCase_ = self.scheduler.step_pred(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = output.prev_sample, output.prev_sample_mean
lowerCamelCase_ = sample_mean.clamp(0 , 1 )
lowerCamelCase_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase )
| 55
| 0
|
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase = HfArgumentParser(InitializationArguments)
lowerCamelCase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 365
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=1):
'''simple docstring'''
__lowercase =tokenizer
__lowercase =dataset
__lowercase =len(_lowerCAmelCase) if n_tasks is None else n_tasks
__lowercase =n_copies
def __iter__( self : Union[str, Any]):
'''simple docstring'''
__lowercase =[]
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip())
__lowercase =self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='pt')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
__lowercase =start_length
__lowercase =eof_strings
__lowercase =tokenizer
def __call__( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :])
__lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(_lowerCAmelCase)
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =re.split('(%s)' % '|'.join(_lowerCAmelCase ) , _lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=20 , **_lowerCAmelCase ):
"""simple docstring"""
__lowercase =defaultdict(_lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCAmelCase ) ):
with torch.no_grad():
__lowercase =batch['ids'].shape[-1]
__lowercase =accelerator.unwrap_model(_lowerCAmelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCAmelCase , **_lowerCAmelCase )
# each task is generated batch_size times
__lowercase =batch['task_id'].repeat(_lowerCAmelCase )
__lowercase =accelerator.pad_across_processes(
_lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
__lowercase , __lowercase =accelerator.gather((generated_tokens, generated_tasks) )
__lowercase =generated_tokens.cpu().numpy()
__lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCAmelCase , _lowerCAmelCase ):
gen_token_dict[task].append(_lowerCAmelCase )
__lowercase =[[] for _ in range(_lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__lowercase =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
code_gens[task].append(remove_last_block(_lowerCAmelCase ) )
return code_gens
def _A ( ):
"""simple docstring"""
__lowercase =HfArgumentParser(_lowerCAmelCase )
__lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__lowercase ='false'
if args.num_workers is None:
__lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__lowercase =Accelerator()
set_seed(args.seed , device_specific=_lowerCAmelCase )
# Load model and tokenizer
__lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
__lowercase =tokenizer.eos_token
__lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__lowercase ={
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCAmelCase , _lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
__lowercase =load_dataset('openai_humaneval' )
__lowercase =load_metric('code_eval' )
__lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
__lowercase =args.n_samples // args.batch_size
__lowercase =TokenizedDataset(_lowerCAmelCase , human_eval['test'] , n_copies=_lowerCAmelCase , n_tasks=_lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
__lowercase =DataLoader(_lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__lowercase =code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
__lowercase , __lowercase =accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =complete_code(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , n_tasks=_lowerCAmelCase , batch_size=args.batch_size , **_lowerCAmelCase , )
if accelerator.is_main_process:
__lowercase =[]
for task in tqdm(range(_lowerCAmelCase ) ):
__lowercase =human_eval['test'][task]['test']
__lowercase =f"""check({human_eval['test'][task]['entry_point']})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
__lowercase , __lowercase =code_eval_metric.compute(
references=_lowerCAmelCase , predictions=_lowerCAmelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 48
| 0
|
"""simple docstring"""
import math
import os
import sys
def _A (__a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ''''''
try:
with open(__a , '''rb''' ) as binary_file:
SCREAMING_SNAKE_CASE_ : Optional[int] = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE_ : int = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _A (__a , __a , __a , __a ) -> None:
"""simple docstring"""
lexicon.pop(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = last_match_id
if math.loga(__a ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE_ : Dict = '''0''' + lexicon[curr_key]
SCREAMING_SNAKE_CASE_ : str = bin(__a )[2:]
def _A (__a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''0''': '''0''', '''1''': '''1'''}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = '''''', ''''''
SCREAMING_SNAKE_CASE_ : Tuple = len(__a )
for i in range(len(__a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE_ : int = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__a , __a , __a , __a )
index += 1
SCREAMING_SNAKE_CASE_ : int = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = lexicon[curr_string]
result += last_match_id
return result
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = os.path.getsize(__a )
SCREAMING_SNAKE_CASE_ : List[str] = bin(__a )[2:]
SCREAMING_SNAKE_CASE_ : List[Any] = len(__a )
return "0" * (length_length - 1) + file_length_binary + compressed
def _A (__a , __a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 8
try:
with open(__a , '''wb''' ) as opened_file:
SCREAMING_SNAKE_CASE_ : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__a ) , __a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__a , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _A (__a , __a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = read_file_binary(__a )
SCREAMING_SNAKE_CASE_ : List[str] = compress_data(__a )
SCREAMING_SNAKE_CASE_ : Dict = add_file_length(__a , __a )
write_file_binary(__a , __a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 91
|
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCAmelCase_ : Optional[Any] = datasets.logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
UpperCAmelCase_ : Tuple = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
UpperCAmelCase_ : Union[str, Any] = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _A (__a , __a , __a=False , __a=False , __a=True , __a=False , __a="dummy_doc" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {doc: key_lines}
SCREAMING_SNAKE_CASE_ : List[str] = {doc: sys_lines}
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
if remove_nested:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def _A (__a , __a , __a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_coref_infos(__a , __a , __a , __a , __a , __a )
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : str = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = evaluator.evaluate_documents(__a , __a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , f'Recall: {recall * 1_00:.2f}' , f' Precision: {precision * 1_00:.2f}' , f' F1: {fa * 1_00:.2f}' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE_ : Tuple = (conll / 3) * 1_00
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def _A (__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE_ : Any = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE_ : Any = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Sequence(datasets.Value('''string''')),
}) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=True , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = util.check_gold_parse_annotation(lowercase_)
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(
key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , )
return score
| 91
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : str = AltDiffusionPipeline
__snake_case : int = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
SCREAMING_SNAKE_CASE = 77
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int=0 ) -> Any:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = text_encoder
SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A photo of an astronaut"""
SCREAMING_SNAKE_CASE = alt_pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = text_encoder
SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,safety_checker=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe([prompt] ,generator=lowerCamelCase__ ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="""np""" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" ,subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe([prompt] ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""numpy""" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 363
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : str = AltDiffusionPipeline
__snake_case : int = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
SCREAMING_SNAKE_CASE = 77
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int=0 ) -> Any:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = text_encoder
SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A photo of an astronaut"""
SCREAMING_SNAKE_CASE = alt_pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = text_encoder
SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,safety_checker=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe([prompt] ,generator=lowerCamelCase__ ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="""np""" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" ,subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe([prompt] ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""numpy""" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 193
| 0
|
'''simple docstring'''
import numpy as np
from PIL import Image
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = np.array(UpperCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Dict = 0
# compute the shape of the output matrix
UpperCAmelCase__ : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCAmelCase__ : Optional[Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCAmelCase__ : List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : int = 0
return updated_arr
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = np.array(UpperCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = 0
# compute the shape of the output matrix
UpperCAmelCase__ : Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCAmelCase__ : int = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCAmelCase__ : str = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : str = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__A =Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 163
|
'''simple docstring'''
from math import sqrt
def _UpperCamelCase ( UpperCamelCase__ = 1_0_0_0_0_0_0 ):
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCamelCase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 163
| 1
|
'''simple docstring'''
a_ = range(2, 2_0 + 1)
a_ = [1_0**k for k in range(ks[-1] + 1)]
a_ = {}
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =sum(a_i[j] for j in range(snake_case__, len(snake_case__ ) ) )
SCREAMING_SNAKE_CASE__ : Dict =sum(a_i[j] * base[j] for j in range(min(len(snake_case__ ), snake_case__ ) ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =0, 0
SCREAMING_SNAKE_CASE__ : List[Any] =n - i
SCREAMING_SNAKE_CASE__ : Tuple =memo.get(snake_case__ )
if sub_memo is not None:
SCREAMING_SNAKE_CASE__ : str =sub_memo.get(snake_case__ )
if jumps is not None and len(snake_case__ ) > 0:
# find and make the largest jump without going over
SCREAMING_SNAKE_CASE__ : Optional[int] =-1
for _k in range(len(snake_case__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =_k
break
if max_jump >= 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =jumps[max_jump]
# since the difference between jumps is cached, add c
SCREAMING_SNAKE_CASE__ : Optional[Any] =diff + c
for j in range(min(snake_case__, len(snake_case__ ) ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =divmod(snake_case__, 1_0 )
if new_c > 0:
add(snake_case__, snake_case__, snake_case__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
else:
SCREAMING_SNAKE_CASE__ : Any ={c: []}
SCREAMING_SNAKE_CASE__ : Dict =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =next_term(snake_case__, k - 1, i + dn, snake_case__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =compute(snake_case__, snake_case__, i + dn, snake_case__ )
diff += _diff
dn += terms_jumped
SCREAMING_SNAKE_CASE__ : Any =sub_memo[c]
# keep jumps sorted by # of terms skipped
SCREAMING_SNAKE_CASE__ : List[str] =0
while j < len(snake_case__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(snake_case__, (diff, dn, k) )
return (diff, dn)
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : int ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(snake_case__ ):
a_i.extend([0 for _ in range(k - len(snake_case__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
SCREAMING_SNAKE_CASE__ : Dict =i
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =0, 0, 0
for j in range(len(snake_case__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
SCREAMING_SNAKE_CASE__ : List[Any] =ds_c + ds_b
diff += addend
SCREAMING_SNAKE_CASE__ : Dict =0
for j in range(snake_case__ ):
SCREAMING_SNAKE_CASE__ : List[str] =a_i[j] + addend
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =divmod(snake_case__, 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(snake_case__, snake_case__, snake_case__ )
return diff, i - start_i
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : Any ):
'''simple docstring'''
for j in range(snake_case__, len(snake_case__ ) ):
SCREAMING_SNAKE_CASE__ : Tuple =digits[j] + addend
if s >= 1_0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =divmod(snake_case__, 1_0 )
SCREAMING_SNAKE_CASE__ : int =addend // 1_0 + quotient
else:
SCREAMING_SNAKE_CASE__ : Tuple =s
SCREAMING_SNAKE_CASE__ : str =addend // 1_0
if addend == 0:
break
while addend > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =divmod(snake_case__, 1_0 )
digits.append(snake_case__ )
def _a( UpperCamelCase__ : int = 1_0**1_5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =[1]
SCREAMING_SNAKE_CASE__ : List[Any] =1
SCREAMING_SNAKE_CASE__ : Dict =0
while True:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =next_term(snake_case__, 2_0, i + dn, snake_case__ )
dn += terms_jumped
if dn == n - i:
break
SCREAMING_SNAKE_CASE__ : Dict =0
for j in range(len(snake_case__ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 365
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
a_ = TypeVar('KT')
a_ = TypeVar('VT')
class __SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
def __init__( self : Dict , __lowercase : KT | str = "root" , __lowercase : VT | None = None ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any =key
SCREAMING_SNAKE_CASE__ : Optional[int] =value
SCREAMING_SNAKE_CASE__ : list[Node[KT, VT]] =[]
def __repr__( self : Any ) -> str:
return F"Node({self.key}: {self.value})"
@property
def __magic_name__ ( self : List[Any] ) -> int:
return len(self.forward )
class __SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
def __init__( self : int , __lowercase : float = 0.5 , __lowercase : int = 16 ) -> int:
SCREAMING_SNAKE_CASE__ : Node[KT, VT] =Node[KT, VT]()
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Dict =p
SCREAMING_SNAKE_CASE__ : Any =max_level
def __str__( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =list(self )
if len(__lowercase ) == 0:
return F"SkipList(level={self.level})"
SCREAMING_SNAKE_CASE__ : List[str] =max((len(str(__lowercase ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE__ : Any =max(__lowercase , 4 ) + 4
SCREAMING_SNAKE_CASE__ : Any =self.head
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Tuple =node.forward.copy()
lines.append(F"[{node.key}]".ljust(__lowercase , '''-''' ) + '''* ''' * len(__lowercase ) )
lines.append(''' ''' * label_size + '''| ''' * len(__lowercase ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =node.forward[0]
lines.append(
F"[{node.key}]".ljust(__lowercase , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Any =node.forward
lines.append('''None'''.ljust(__lowercase ) + '''* ''' * len(__lowercase ) )
return F"SkipList(level={self.level})\n" + "\n".join(__lowercase )
def __iter__( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] =self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE__ : List[Any] =node.forward[0]
def __magic_name__ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =1
while random() < self.p and level < self.max_level:
level += 1
return level
def __magic_name__ ( self : str , __lowercase : List[str] ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
SCREAMING_SNAKE_CASE__ : int =self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE__ : str =node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __magic_name__ ( self : Optional[int] , __lowercase : KT ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =self._locate_node(__lowercase )
if node is not None:
for i, update_node in enumerate(__lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE__ : List[str] =node.forward[i]
else:
SCREAMING_SNAKE_CASE__ : str =update_node.forward[:i]
def __magic_name__ ( self : str , __lowercase : KT , __lowercase : VT ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self._locate_node(__lowercase )
if node is not None:
SCREAMING_SNAKE_CASE__ : Dict =value
else:
SCREAMING_SNAKE_CASE__ : List[Any] =self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __lowercase ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE__ : List[str] =level
SCREAMING_SNAKE_CASE__ : List[str] =Node(__lowercase , __lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : int =new_node
def __magic_name__ ( self : Tuple , __lowercase : VT ) -> VT | None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =self._locate_node(__lowercase )
if node is not None:
return node.value
return None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =SkipList()
skip_list.insert('''Key1''', 3 )
skip_list.insert('''Key2''', 1_2 )
skip_list.insert('''Key3''', 4_1 )
skip_list.insert('''Key4''', -1_9 )
SCREAMING_SNAKE_CASE__ : Any =skip_list.head
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={}
while node.level != 0:
SCREAMING_SNAKE_CASE__ : Dict =node.forward[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] =node.value
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =SkipList()
skip_list.insert('''Key1''', 1_0 )
skip_list.insert('''Key1''', 1_2 )
skip_list.insert('''Key5''', 7 )
skip_list.insert('''Key7''', 1_0 )
skip_list.insert('''Key10''', 5 )
skip_list.insert('''Key7''', 7 )
skip_list.insert('''Key5''', 5 )
skip_list.insert('''Key10''', 1_0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =skip_list.head
SCREAMING_SNAKE_CASE__ : List[Any] ={}
while node.level != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =node.forward[0]
SCREAMING_SNAKE_CASE__ : List[Any] =node.value
if len(UpperCamelCase__ ) != 4:
print()
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =SkipList()
assert skip_list.find('''Some key''' ) is None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =SkipList()
skip_list.insert('''Key2''', 2_0 )
assert skip_list.find('''Key2''' ) == 2_0
skip_list.insert('''Some Key''', 1_0 )
skip_list.insert('''Key2''', 8 )
skip_list.insert('''V''', 1_3 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 1_0
assert skip_list.find('''V''' ) == 1_3
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =SkipList()
skip_list.insert('''Key1''', 1_2 )
skip_list.insert('''V''', 1_3 )
skip_list.insert('''X''', 1_4 )
skip_list.insert('''Key2''', 1_5 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =SkipList()
skip_list.insert('''Key1''', 1_2 )
skip_list.insert('''V''', 1_3 )
skip_list.insert('''X''', 1_4 )
skip_list.insert('''Key2''', 1_5 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 1_4
assert skip_list.find('''Key1''' ) == 1_2
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 1_2
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =SkipList()
skip_list.insert('''Key1''', 1_2 )
skip_list.insert('''V''', 1_3 )
skip_list.insert('''X''', 1_4_2 )
skip_list.insert('''Key2''', 1_5 )
skip_list.delete('''X''' )
def traverse_keys(UpperCamelCase__ : Dict ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(UpperCamelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _a( ):
'''simple docstring'''
def is_sorted(UpperCamelCase__ : List[Any] ):
return all(next_item >= item for item, next_item in zip(UpperCamelCase__, lst[1:] ) )
SCREAMING_SNAKE_CASE__ : Tuple =SkipList()
for i in range(1_0 ):
skip_list.insert(UpperCamelCase__, UpperCamelCase__ )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.insert(-1_2, -1_2 )
skip_list.insert(7_7, 7_7 )
assert is_sorted(list(UpperCamelCase__ ) )
def _a( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =SkipList()
skip_list.insert(2, '''2''' )
skip_list.insert(4, '''4''' )
skip_list.insert(6, '''4''' )
skip_list.insert(4, '''5''' )
skip_list.insert(8, '''4''' )
skip_list.insert(9, '''4''' )
skip_list.delete(4 )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 222
| 0
|
'''simple docstring'''
def __lowercase ( __lowercase = 10**9 ) -> int:
'''simple docstring'''
_A = 1
_A = 2
_A = 0
_A = 0
_A = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_A = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 79
|
import os
import sys
a =os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a =[
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
return AutoConfig.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModel.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
return AutoModelForCausalLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForMaskedLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
| 73
| 0
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( _a : str ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_a ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def lowerCamelCase_ ( _a : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCAmelCase_ : Dict = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
UpperCAmelCase_ : str = PipelineDataFormat.from_str(
format=_a , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_a , _a )
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Pipeline ,lowerCamelCase_: PipelineDataFormat ) -> str:
UpperCAmelCase_ : Dict = nlp
UpperCAmelCase_ : Optional[Any] = reader
@staticmethod
def A__ ( lowerCamelCase_: ArgumentParser ) -> Tuple:
UpperCAmelCase_ : Optional[int] = parser.add_parser("""run""" ,help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" ,choices=get_supported_tasks() ,help="""Task to run""" )
run_parser.add_argument("""--input""" ,type=lowerCamelCase_ ,help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" ,type=lowerCamelCase_ ,help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" ,type=lowerCamelCase_ ,help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" ,type=lowerCamelCase_ ,help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" ,type=lowerCamelCase_ ,help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" ,type=lowerCamelCase_ ,help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" ,)
run_parser.add_argument(
"""--format""" ,type=lowerCamelCase_ ,default="""infer""" ,choices=PipelineDataFormat.SUPPORTED_FORMATS ,help="""Input format to read from""" ,)
run_parser.add_argument(
"""--device""" ,type=lowerCamelCase_ ,default=-1 ,help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" ,)
run_parser.add_argument("""--overwrite""" ,action="""store_true""" ,help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self._nlp, []
for entry in self._reader:
UpperCAmelCase_ : Dict = nlp(**lowerCamelCase_ ) if self._reader.is_multi_columns else nlp(lowerCamelCase_ )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
outputs.append(lowerCamelCase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCAmelCase_ : Optional[int] = self._reader.save_binary(lowerCamelCase_ )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(lowerCamelCase_ )
| 59
|
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase_ = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
UpperCamelCase_ = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
UpperCamelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: Any ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] ,)
def A__ ( self: List[str] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any]=None ) -> int:
return {
"matthews_correlation": float(matthews_corrcoef(lowerCamelCase_ ,lowerCamelCase_ ,sample_weight=lowerCamelCase_ ) ),
}
| 59
| 1
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
snake_case__ : Tuple = gray_code_sequence_string(_lowerCAmelCase )
#
# convert them to integers
for i in range(len(_lowerCAmelCase ) ):
snake_case__ : Union[str, Any] = int(sequence[i] , 2 )
return sequence
def __snake_case( _lowerCAmelCase ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case__ : Union[str, Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case__ : List[Any] = gray_code_sequence_string(bit_count - 1 )
snake_case__ : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case__ : Union[str, Any] = """0""" + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case__ : Tuple = """1""" + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
|
import math
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = ["input_features"]
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=80 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16_000 , __SCREAMING_SNAKE_CASE : Optional[int]=160 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : Dict=400 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Tuple=False , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = n_fft
__SCREAMING_SNAKE_CASE = hop_length
__SCREAMING_SNAKE_CASE = chunk_length
__SCREAMING_SNAKE_CASE = chunk_length * sampling_rate
__SCREAMING_SNAKE_CASE = self.n_samples // hop_length
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
__SCREAMING_SNAKE_CASE = log_spec[:, :-1]
__SCREAMING_SNAKE_CASE = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
__SCREAMING_SNAKE_CASE = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
__SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
__SCREAMING_SNAKE_CASE = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
__SCREAMING_SNAKE_CASE = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__SCREAMING_SNAKE_CASE = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "max_length" , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T]
__SCREAMING_SNAKE_CASE = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
__SCREAMING_SNAKE_CASE = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__SCREAMING_SNAKE_CASE = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
__SCREAMING_SNAKE_CASE = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
__SCREAMING_SNAKE_CASE = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
__SCREAMING_SNAKE_CASE = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
__SCREAMING_SNAKE_CASE = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__SCREAMING_SNAKE_CASE = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def UpperCAmelCase__ ( self : List[Any] ) -> Dict[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 331
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 331
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case : List[Any] = random.Random()
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : List[str]=1.0, lowerCAmelCase_ : Dict=None, lowerCAmelCase_ : Optional[Any]=None ):
if rng is None:
__lowerCAmelCase = global_rng
__lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : int=4_0_0 , lowerCAmelCase_ : Any=2_0_0_0 , lowerCAmelCase_ : Optional[int]=2_0_4_8 , lowerCAmelCase_ : List[str]=1_2_8 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Optional[Any]=5_1_2 , lowerCAmelCase_ : str=3_0 , lowerCAmelCase_ : List[Any]=4_4_1_0_0 , ) -> Optional[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = min_seq_length
__lowerCAmelCase = max_seq_length
__lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase = spectrogram_length
__lowerCAmelCase = feature_size
__lowerCAmelCase = num_audio_channels
__lowerCAmelCase = hop_length
__lowerCAmelCase = chunk_length
__lowerCAmelCase = sampling_rate
def lowercase ( self : List[Any] ) -> Any:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Dict=False ) -> Tuple:
def _flatten(lowerCAmelCase_ : Union[str, Any] ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
__lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
a_ = TvltFeatureExtractor
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase = TvltFeatureExtractionTester(self )
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCamelCase , 'spectrogram_length' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'feature_size' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'num_audio_channels' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'hop_length' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'chunk_length' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'sampling_rate' ) )
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = feat_extract_first.save_pretrained(__lowerCamelCase )[0]
check_json_file_has_correct_format(__lowerCamelCase )
__lowerCAmelCase = self.feature_extraction_class.from_pretrained(__lowerCamelCase )
__lowerCAmelCase = feat_extract_first.to_dict()
__lowerCAmelCase = feat_extract_second.to_dict()
__lowerCAmelCase = dict_first.pop('mel_filters' )
__lowerCAmelCase = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__lowerCamelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(__lowerCamelCase )
__lowerCAmelCase = self.feature_extraction_class.from_json_file(__lowerCamelCase )
__lowerCAmelCase = feat_extract_first.to_dict()
__lowerCAmelCase = feat_extract_second.to_dict()
__lowerCAmelCase = dict_first.pop('mel_filters' )
__lowerCAmelCase = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase ( self : int ) -> int:
# Initialize feature_extractor
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__lowerCAmelCase = feature_extractor(__lowerCamelCase , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__lowerCAmelCase = feature_extractor(
__lowerCamelCase , return_tensors='np' , sampling_rate=4_4_1_0_0 , mask_audio=__lowerCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowerCAmelCase = np.asarray(__lowerCamelCase )
__lowerCAmelCase = feature_extractor(__lowerCamelCase , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowercase ( self : int , lowerCAmelCase_ : List[str] ) -> Tuple:
__lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__lowerCAmelCase = ds.sort('id' ).select(range(__lowerCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = self._load_datasamples(1 )
__lowerCAmelCase = TvltFeatureExtractor()
__lowerCAmelCase = feature_extractor(__lowerCamelCase , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
__lowerCAmelCase = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowerCamelCase , atol=1e-4 ) )
| 284
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a__: Dict = logging.get_logger(__name__)
a__: str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__: Any = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
a__: Any = {
'distilbert-base-uncased': 512,
'distilbert-base-uncased-distilled-squad': 512,
'distilbert-base-cased': 512,
'distilbert-base-cased-distilled-squad': 512,
'distilbert-base-german-cased': 512,
'distilbert-base-multilingual-cased': 512,
}
a__: Optional[Any] = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE = DistilBertTokenizer
def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase="[UNK]",__lowerCamelCase="[SEP]",__lowerCamelCase="[PAD]",__lowerCamelCase="[CLS]",__lowerCamelCase="[MASK]",__lowerCamelCase=True,__lowerCamelCase=None,**__lowerCamelCase,):
super().__init__(
__lowerCamelCase,tokenizer_file=__lowerCamelCase,do_lower_case=__lowerCamelCase,unk_token=__lowerCamelCase,sep_token=__lowerCamelCase,pad_token=__lowerCamelCase,cls_token=__lowerCamelCase,mask_token=__lowerCamelCase,tokenize_chinese_chars=__lowerCamelCase,strip_accents=__lowerCamelCase,**__lowerCamelCase,)
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''',__lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''',__lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''',__lowerCamelCase ) != tokenize_chinese_chars
):
A__ = getattr(__lowerCamelCase,normalizer_state.pop('''type''' ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**__lowerCamelCase )
A__ = do_lower_case
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = self._tokenizer.model.save(__lowerCamelCase,name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 193
| 0
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCamelCase : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : str = field(
default=__a , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__a )} )
_A : str = field(
default=__a , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_A : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_A : int = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_A : int = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_A : int = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_A : bool = field(
default=__a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_A : bool = field(
default=__a , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_A : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_A : int = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_A : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_A : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[int] = '''train'''
_A : List[str] = '''dev'''
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : SquadDataTrainingArguments
_A : List[SquadFeatures]
_A : Split
_A : bool
def __init__( self : int , __a : SquadDataTrainingArguments , __a : PreTrainedTokenizer , __a : Optional[int] = None , __a : Union[str, Split] = Split.train , __a : Optional[bool] = False , __a : Optional[str] = None , __a : Optional[str] = "pt" , ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = args
__lowercase : List[str] = is_language_sensitive
__lowercase : List[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__a , __a ):
try:
__lowercase : Any = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowercase : int = mode
# Load data features from cache or dataset file
__lowercase : Optional[int] = """v2""" if args.version_2_with_negative else """v1"""
__lowercase : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase : Any = cached_features_file + """.lock"""
with FileLock(__a ):
if os.path.exists(__a ) and not args.overwrite_cache:
__lowercase : Tuple = time.time()
__lowercase : Any = torch.load(__a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowercase : Optional[int] = self.old_features["""features"""]
__lowercase : List[Any] = self.old_features.get("""dataset""" , __a )
__lowercase : Dict = self.old_features.get("""examples""" , __a )
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
""" future run""" )
else:
if mode == Split.dev:
__lowercase : str = self.processor.get_dev_examples(args.data_dir )
else:
__lowercase : Tuple = self.processor.get_train_examples(args.data_dir )
__lowercase , __lowercase : Union[str, Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__a , )
__lowercase : List[str] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , __a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : str , __a : List[Any] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
__lowercase : str = self.features[i]
__lowercase : List[str] = torch.tensor(feature.input_ids , dtype=torch.long )
__lowercase : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowercase : int = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowercase : Union[str, Any] = torch.tensor(feature.cls_index , dtype=torch.long )
__lowercase : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float )
__lowercase : str = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowercase : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowercase : List[str] = torch.tensor(feature.start_position , dtype=torch.long )
__lowercase : int = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 306
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
| 1
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =(DDPMScheduler,)
def __UpperCamelCase ( self : List[str] , **a : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**a )
return config
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
self.check_over_configs(thresholding=a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : str = scheduler_class(**a )
SCREAMING_SNAKE_CASE : Any = len(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(a , a , a , generator=a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE : Any = pred_prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(a ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**a )
SCREAMING_SNAKE_CASE : Any = len(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : str = model(a , a )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : Tuple = scheduler.step(a , a , a , generator=a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE : Optional[int] = pred_prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(a ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**a )
SCREAMING_SNAKE_CASE : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(a ):
if i == len(a ) - 1:
SCREAMING_SNAKE_CASE : List[str] = -1
else:
SCREAMING_SNAKE_CASE : Tuple = timesteps[i + 1]
SCREAMING_SNAKE_CASE : List[Any] = scheduler.previous_timestep(a )
SCREAMING_SNAKE_CASE : List[Any] = prev_t.item()
self.assertEqual(a , a )
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : str = scheduler_class(**a )
SCREAMING_SNAKE_CASE : str = [100, 87, 50, 51, 0]
with self.assertRaises(a , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=a )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**a )
SCREAMING_SNAKE_CASE : int = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE : List[Any] = len(a )
with self.assertRaises(a , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**a )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=a )
| 76
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_UpperCAmelCase : List[Any] = get_logger(__name__)
_UpperCAmelCase : Tuple = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , A_ , **A_ ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
UpperCamelCase = inspect.signature(processor.__call__ ).parameters
if len(A_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
UpperCamelCase = processor(A_ , A_ , A_ , **A_ )
else:
UpperCamelCase = processor(A_ , A_ , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCamelCase = temperature
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores / self.temperature
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(A_ , A_ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCamelCase = top_p
UpperCamelCase = filter_value
UpperCamelCase = min_tokens_to_keep
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , scores.shape[-1] )
UpperCamelCase = jnp.full_like(A_ , self.filter_value )
UpperCamelCase = jax.nn.softmax(A_ , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase = jnp.roll(A_ , 1 )
score_mask |= score_mask.at[:, 0].set(A_ )
# min tokens to keep
UpperCamelCase = score_mask.at[:, : self.min_tokens_to_keep].set(A_ )
UpperCamelCase = jnp.where(A_ , A_ , A_ )
UpperCamelCase = jax.lax.sort_key_val(A_ , A_ )[-1]
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[str]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCamelCase = max(A_ , A_ )
UpperCamelCase = filter_value
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = scores.shape
UpperCamelCase = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , A_ )
UpperCamelCase = jnp.broadcast_to((jnp.arange(A_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase = topk_scores.flatten()
UpperCamelCase = topk_indices.flatten() + shift
UpperCamelCase = next_scores_flat.at[topk_indices_flat].set(A_ )
UpperCamelCase = next_scores_flat.reshape(A_ , A_ )
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = bos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.bos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = max_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.eos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(A_ , A_ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCamelCase = min_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
# create boolean flag to decide if min length penalty should be applied
UpperCamelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = list(A_ )
UpperCamelCase = begin_index
def __call__( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = dict(A_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase = force_token_array.at[index].set(A_ )
UpperCamelCase = jnp.intaa(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(A_ ):
UpperCamelCase = scores.shape[0]
UpperCamelCase = self.force_token_array[generation_idx]
UpperCamelCase = jnp.ones_like(A_ , dtype=scores.dtype ) * -float('inf' )
UpperCamelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase = lax.dynamic_update_slice(A_ , A_ , (0, current_token) )
return new_scores
UpperCamelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(A_ ) , lambda: scores , ) , )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = generate_config.eos_token_id
UpperCamelCase = generate_config.no_timestamps_token_id
UpperCamelCase = generate_config.no_timestamps_token_id + 1
UpperCamelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(A_ , 'max_initial_timestamp_index' ):
UpperCamelCase = generate_config.max_initial_timestamp_index
else:
UpperCamelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase = model_config.vocab_size
def __call__( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCamelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(A_ , A_ ):
UpperCamelCase = jnp.where((cur_len - self.begin_index) >= 1 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , A_ , )
UpperCamelCase = jnp.where((cur_len - self.begin_index) < 2 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , A_ , A_ , )
return jnp.where(
A_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
UpperCamelCase = jnp.where(cur_len == self.begin_index , A_ , A_ )
UpperCamelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , A_ , )
UpperCamelCase = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase = jnp.where(
A_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , A_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase = jax.nn.log_softmax(A_ , axis=-1 )
def handle_cumulative_probs(A_ , A_ ):
UpperCamelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
return scores
| 222
| 0
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class a_ ( __lowerCamelCase ):
'''simple docstring'''
__a: str = DistilBertTokenizer
__a: Union[str, Any] = DistilBertTokenizerFast
__a: Optional[Any] = True
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
lowerCAmelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 356
|
lowerCamelCase_ = 6_5_5_2_1
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
for plain_chr in plain_text:
lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER
lowerCAmelCase_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14
| 0
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : List[List[List[str]]] , snake_case__ : List[List[str]] , snake_case__ : int = 1 , snake_case__ : int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case__ , hypotheses=snake_case__ , min_len=snake_case__ , max_len=snake_case__ )
}
| 59
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]:
'''simple docstring'''
snake_case : int = tempfile.mkdtemp()
# fmt: off
snake_case : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
snake_case : int = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
snake_case : Optional[Any] = os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : str ) -> Optional[int]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : List[str] ) -> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
snake_case : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.get_tokenizer()
snake_case : Optional[Any] = self.get_image_processor()
snake_case : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : Tuple = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
snake_case : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : str = self.get_image_processor()
snake_case : Optional[int] = self.get_tokenizer()
snake_case : List[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : str = image_processor(snake_case__ , return_tensors="np" )
snake_case : Any = processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = self.get_image_processor()
snake_case : int = self.get_tokenizer()
snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Tuple = "lower newer"
snake_case : Tuple = processor(text=snake_case__ )
snake_case : Union[str, Any] = tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = self.get_image_processor()
snake_case : Dict = self.get_tokenizer()
snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : int = "lower newer"
snake_case : Dict = self.prepare_image_inputs()
snake_case : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(snake_case__ ):
processor()
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = self.get_image_processor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : Tuple = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : List[Any] = processor.batch_decode(snake_case__ )
snake_case : Union[str, Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : str = self.get_image_processor()
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = "lower newer"
snake_case : List[Any] = self.prepare_image_inputs()
snake_case : Tuple = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 59
| 1
|
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / 'foo.lock' ) )
SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / 'foo.lock' ) )
SCREAMING_SNAKE_CASE = 0.01
with locka.acquire():
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = time.time()
locka.acquire(SCREAMING_SNAKE_CASE_ )
assert time.time() - _start > timeout
def lowercase (SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
SCREAMING_SNAKE_CASE = 'a' * 10_00 + '.lock'
SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(SCREAMING_SNAKE_CASE_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
SCREAMING_SNAKE_CASE = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
locka.acquire(0 )
| 38
|
"""simple docstring"""
from math import sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def lowercase (SCREAMING_SNAKE_CASE_ : int = 1_00_00 ) -> int:
SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 38
| 1
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase :List[Any] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : str = ["""input_features"""]
def __init__( self : str , _A : List[Any]=80 , _A : int=16000 , _A : List[Any]=160 , _A : Tuple=30 , _A : Tuple=400 , _A : Union[str, Any]=0.0 , _A : List[Any]=False , **_A : Optional[Any] , ) -> Optional[Any]:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
__magic_name__ : Dict = n_fft
__magic_name__ : Optional[int] = hop_length
__magic_name__ : Tuple = chunk_length
__magic_name__ : int = chunk_length * sampling_rate
__magic_name__ : Dict = self.n_samples // hop_length
__magic_name__ : int = sampling_rate
__magic_name__ : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_A , norm='slaney' , mel_scale='slaney' , )
def __lowerCAmelCase ( self : Optional[int] , _A : np.array ) -> np.ndarray:
__magic_name__ : int = spectrogram(
_A , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
__magic_name__ : Optional[Any] = log_spec[:, :-1]
__magic_name__ : List[str] = np.maximum(_A , log_spec.max() - 8.0 )
__magic_name__ : Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowerCAmelCase ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__magic_name__ : Tuple = np.array(_A , np.intaa )
__magic_name__ : Optional[int] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
__magic_name__ : Union[str, Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__magic_name__ : Optional[int] = padding_value
normed_input_values.append(_A )
else:
__magic_name__ : List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : str , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__magic_name__ : Any = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__magic_name__ : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__magic_name__ : str = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
__magic_name__ : Union[str, Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__magic_name__ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__magic_name__ : Optional[Any] = [np.asarray([raw_speech] ).T]
__magic_name__ : Optional[Any] = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
__magic_name__ : int = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__magic_name__ : str = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
__magic_name__ : List[str] = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
__magic_name__ : Union[str, Any] = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
__magic_name__ : int = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
__magic_name__ : Optional[int] = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
__magic_name__ : List[Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__magic_name__ : Tuple = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
__magic_name__ : Dict = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __lowerCAmelCase ( self : List[Any] ) -> Dict[str, Any]:
__magic_name__ : Tuple = copy.deepcopy(self.__dict__ )
__magic_name__ : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 331
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A )
| 331
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__UpperCAmelCase =[8, 5, 9, 7]
__UpperCAmelCase =[
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__UpperCAmelCase =[
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class a__ :
def __init__( self : int , a : list[int] , a : list[list[int]] , a : list[list[int]] , ):
"""simple docstring"""
__lowerCamelCase = claim_vector
__lowerCamelCase = allocated_resources_table
__lowerCamelCase = maximum_claim_table
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return {self.__need().index(a ): i for i in self.__need()}
def SCREAMING_SNAKE_CASE__ ( self : int , **a : str ):
"""simple docstring"""
__lowerCamelCase = self.__need()
__lowerCamelCase = self.__allocated_resources_table
__lowerCamelCase = self.__available_resources()
__lowerCamelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
__lowerCamelCase = False
for each_need in need_list:
__lowerCamelCase = True
for index, need in enumerate(a ):
if need > available_resources[index]:
__lowerCamelCase = False
break
if execution:
__lowerCamelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCamelCase = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(a )
# update available/freed resources stack
__lowerCamelCase = np.array(a ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(a ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(a ) + 1}"""
+ ''' '''.join(f"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(a ) + 1}"""
+ ''' '''.join(f"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(a ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __lowerCAmelCase ( UpperCamelCase__ ) -> None:
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__UpperCAmelCase =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 237
| 1
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase = 16
UpperCamelCase = 32
def __lowerCamelCase ( snake_case__ ,snake_case__ = 16 ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case__ ,max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE = datasets.map(
snake_case__ ,batched=snake_case__ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE = 8
else:
_SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
snake_case__ ,padding="""longest""" ,max_length=snake_case__ ,pad_to_multiple_of=snake_case__ ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""train"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=snake_case__ )
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,snake_case__ ) == "1":
_SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
_SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE = config["""lr"""]
_SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE = evaluate.load("""glue""" ,"""mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(snake_case__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() ,lr=snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_dataloaders(snake_case__ ,snake_case__ )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=snake_case__ ,num_warmup_steps=1_00 ,num_training_steps=(len(snake_case__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ ,references=snake_case__ ,)
_SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' ,snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=snake_case__ ,default=snake_case__ ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ ,snake_case__ )
if __name__ == "__main__":
main()
| 306
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'EncodecFeatureExtractor'
SCREAMING_SNAKE_CASE__ = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
a :List[Any] = self.feature_extractor
a :List[str] = False
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowerCamelCase , language=_lowerCamelCase , no_timestamps=_lowerCamelCase )
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
a :Any = kwargs.pop('''audio''' , _lowerCamelCase )
a :Tuple = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
a :List[str] = kwargs.pop('''text''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
a :Tuple = args[0]
a :Tuple = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
a :Dict = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if audio is not None:
a :Tuple = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
a :List[Any] = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
a :Tuple = audio_inputs['''padding_mask''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
a :List[Any] = kwargs.pop('''audio''' , _lowerCamelCase )
a :Tuple = kwargs.pop('''padding_mask''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
a :Union[str, Any] = args[0]
a :Dict = args[1:]
if audio_values is not None:
return self._decode_audio(_lowerCamelCase , padding_mask=_lowerCamelCase )
else:
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
a :str = to_numpy(_lowerCamelCase )
a , a , a :Union[str, Any] = audio_values.shape
if padding_mask is None:
return list(_lowerCamelCase )
a :Dict = to_numpy(_lowerCamelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
a :str = seq_len - padding_mask.shape[-1]
a :Optional[Any] = 1 - self.feature_extractor.padding_value
a :Tuple = np.pad(_lowerCamelCase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowerCamelCase )
a :Any = audio_values.tolist()
for i in range(_lowerCamelCase ):
a :Optional[int] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
a :Any = sliced_audio.reshape(_lowerCamelCase , -1 )
return audio_values
| 281
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = CycleDiffusionPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {'latents'}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
a :List[str] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
a :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
a :Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a :str = CLIPTextModel(_lowerCamelCase )
a :List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :Tuple = image / 2 + 0.5
if str(_lowerCamelCase ).startswith('''mps''' ):
a :List[str] = torch.manual_seed(_lowerCamelCase )
else:
a :Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :int = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a :Optional[Any] = self.get_dummy_components()
a :Dict = CycleDiffusionPipeline(**_lowerCamelCase )
a :Optional[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :List[str] = self.get_dummy_inputs(_lowerCamelCase )
a :Any = pipe(**_lowerCamelCase )
a :List[Any] = output.images
a :str = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a :List[Any] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCamelCase , '''half''' ):
a :Union[str, Any] = module.half()
a :List[Any] = CycleDiffusionPipeline(**_lowerCamelCase )
a :Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Tuple = self.get_dummy_inputs(_lowerCamelCase )
a :Optional[int] = pipe(**_lowerCamelCase )
a :Optional[Any] = output.images
a :List[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a :str = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_inference_batch_single_identical()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
a :Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
a :Optional[Any] = init_image.resize((512, 512) )
a :List[str] = '''CompVis/stable-diffusion-v1-4'''
a :List[str] = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' )
a :Tuple = CycleDiffusionPipeline.from_pretrained(
_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
a :Optional[Any] = '''A black colored car'''
a :Any = '''A blue colored car'''
a :str = torch.manual_seed(0 )
a :List[Any] = pipe(
prompt=_lowerCamelCase , source_prompt=_lowerCamelCase , image=_lowerCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCamelCase , output_type='''np''' , )
a :int = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
a :Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
a :List[str] = init_image.resize((512, 512) )
a :List[str] = '''CompVis/stable-diffusion-v1-4'''
a :Any = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' )
a :int = CycleDiffusionPipeline.from_pretrained(_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
a :Optional[int] = '''A black colored car'''
a :Any = '''A blue colored car'''
a :Optional[int] = torch.manual_seed(0 )
a :Union[str, Any] = pipe(
prompt=_lowerCamelCase , source_prompt=_lowerCamelCase , image=_lowerCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCamelCase , output_type='''np''' , )
a :Optional[int] = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 281
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCAmelCase = (720, 1280) # Height, Width
__UpperCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCAmelCase = 1 / 100
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = 250
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_dataset(__snake_case , __snake_case )
for index in range(__snake_case ):
UpperCAmelCase_ : Optional[int] = random.sample(range(len(__snake_case ) ) , 4 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = update_image_and_anno(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , filter_scale=__snake_case , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase_ : Optional[Any] = random_chars(32 )
UpperCAmelCase_ : Optional[int] = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
UpperCAmelCase_ : Optional[int] = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , __snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
UpperCAmelCase_ : Optional[Any] = []
for anno in new_annos:
UpperCAmelCase_ : str = anno[3] - anno[1]
UpperCAmelCase_ : Any = anno[4] - anno[2]
UpperCAmelCase_ : Any = anno[1] + width / 2
UpperCAmelCase_ : Dict = anno[2] + height / 2
UpperCAmelCase_ : Union[str, Any] = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(__snake_case )
with open(F"{file_root}.txt" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowercase__ ( __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Dict = []
for label_file in glob.glob(os.path.join(__snake_case , '*.txt' ) ):
UpperCAmelCase_ : Union[str, Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__snake_case ) as in_file:
UpperCAmelCase_ : str = in_file.readlines()
UpperCAmelCase_ : Optional[int] = os.path.join(__snake_case , F"{label_name}.jpg" )
UpperCAmelCase_ : List[Any] = []
for obj_list in obj_lists:
UpperCAmelCase_ : str = obj_list.rstrip('\n' ).split(' ' )
UpperCAmelCase_ : str = float(obj[1] ) - float(obj[3] ) / 2
UpperCAmelCase_ : Optional[Any] = float(obj[2] ) - float(obj[4] ) / 2
UpperCAmelCase_ : Any = float(obj[1] ) + float(obj[3] ) / 2
UpperCAmelCase_ : Dict = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__snake_case )
labels.append(__snake_case )
return img_paths, labels
def lowercase__ ( __snake_case : list , __snake_case : list , __snake_case : list[int] , __snake_case : tuple[int, int] , __snake_case : tuple[float, float] , __snake_case : float = 0.0 , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
UpperCAmelCase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase_ : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase_ : Any = int(scale_x * output_size[1] )
UpperCAmelCase_ : List[str] = int(scale_y * output_size[0] )
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for i, index in enumerate(__snake_case ):
UpperCAmelCase_ : Any = all_img_list[index]
path_list.append(__snake_case )
UpperCAmelCase_ : Optional[Any] = all_annos[index]
UpperCAmelCase_ : Tuple = cva.imread(__snake_case )
if i == 0: # top-left
UpperCAmelCase_ : Any = cva.resize(__snake_case , (divid_point_x, divid_point_y) )
UpperCAmelCase_ : str = img
for bbox in img_annos:
UpperCAmelCase_ : Optional[int] = bbox[1] * scale_x
UpperCAmelCase_ : str = bbox[2] * scale_y
UpperCAmelCase_ : Dict = bbox[3] * scale_x
UpperCAmelCase_ : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
UpperCAmelCase_ : Optional[int] = cva.resize(__snake_case , (output_size[1] - divid_point_x, divid_point_y) )
UpperCAmelCase_ : Dict = img
for bbox in img_annos:
UpperCAmelCase_ : Any = scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase_ : Dict = bbox[2] * scale_y
UpperCAmelCase_ : Dict = scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase_ : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
UpperCAmelCase_ : Dict = cva.resize(__snake_case , (divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase_ : Union[str, Any] = img
for bbox in img_annos:
UpperCAmelCase_ : str = bbox[1] * scale_x
UpperCAmelCase_ : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase_ : str = bbox[3] * scale_x
UpperCAmelCase_ : Optional[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
UpperCAmelCase_ : Optional[Any] = cva.resize(
__snake_case , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase_ : List[Any] = img
for bbox in img_annos:
UpperCAmelCase_ : List[str] = scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase_ : int = scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase_ : str = scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase_ : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
UpperCAmelCase_ : int = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase_ : int = ascii_lowercase + digits
return "".join(random.choice(__snake_case ) for _ in range(__snake_case ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 29
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = "utf-8"
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = True # deprecated
UpperCAmelCase__ = None # deprecated
UpperCAmelCase__ = 10 << 20 # 10MB
UpperCAmelCase__ = None
class UpperCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__ = JsonConfig
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
A__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
A__ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase__ , (str, list, tuple)):
A__ = data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
A__ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files}))
return splits
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type
A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
# We keep only the field we are interested in
A__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase__ , (list, tuple)):
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
else:
A__ = dataset
A__ = pa.Table.from_pydict(UpperCAmelCase__)
yield file_idx, self._cast_table(UpperCAmelCase__)
# If the file has one json object per line
else:
with open(UpperCAmelCase__ , '''rb''') as f:
A__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ = max(self.config.chunksize // 32 , 16 << 10)
A__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A__ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''')
try:
while True:
try:
A__ = paj.read_json(
io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase__ , pa.ArrowInvalid)
and "straddling" not in str(UpperCAmelCase__)
or block_size > len(UpperCAmelCase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON
try:
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
A__ = pa.Table.from_pydict(UpperCAmelCase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(UpperCAmelCase__)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__)
batch_idx += 1
| 14
| 0
|
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if any(not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(lowerCamelCase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCamelCase__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 121
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = f'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = f'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase__ = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = f'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = f'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase__ = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = '''mid_block.attentions.0.'''
lowerCAmelCase__ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = f'''mid_block.resnets.{j}.'''
lowerCAmelCase__ = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase__ : str = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase__ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase__ : int = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[Any] = v
lowercase__ : Union[str, Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = f'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase__ = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase__ = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = f'''mid_block.resnets.{i}.'''
lowerCAmelCase__ = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase__ : Optional[int] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase__ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = v
lowercase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase__ : Optional[int] = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
lowercase__ : Dict = reshape_weight_for_sd(lowerCamelCase__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {'''q''': 0, '''k''': 1, '''v''': 2}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = {}
lowercase__ : List[Any] = {}
lowercase__ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowercase__ : int = k[: -len(".q_proj.weight" )]
lowercase__ : Optional[Any] = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowercase__ : Dict = [None, None, None]
lowercase__ : Any = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowercase__ : Optional[int] = k[: -len(".q_proj.bias" )]
lowercase__ : Any = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowercase__ : str = [None, None, None]
lowercase__ : str = v
continue
lowercase__ : Union[str, Any] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : List[Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase__ : str = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : Any = torch.cat(lowerCamelCase__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase__ : List[str] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : Tuple = torch.cat(lowerCamelCase__ )
return new_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCAmelCase__ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 121
| 1
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : str , __lowerCamelCase : pyspark.sql.DataFrame , __lowerCamelCase : Optional[NamedSplit] = None , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : bool = True , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : bool = True , __lowerCamelCase : str = "arrow" , **__lowerCamelCase : Dict , ):
super().__init__(
split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :List[Any] = load_from_cache_file
UpperCamelCase :Tuple = file_format
UpperCamelCase :Any = Spark(
df=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , working_dir=__lowerCamelCase , **__lowerCamelCase , )
def _A ( self : str ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCamelCase :List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 38
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Optional[Any] = TransfoXLTokenizer
snake_case__ : List[Any] = False
snake_case__ : Tuple = False
def _A ( self : str ):
super().setUp()
UpperCamelCase :Dict = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
UpperCamelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _A ( self : List[str] , **__lowerCamelCase : Any ):
UpperCamelCase :Any = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : Any , __lowerCamelCase : int ):
UpperCamelCase :List[Any] = """<unk> UNwanted , running"""
UpperCamelCase :int = """<unk> unwanted, running"""
return input_text, output_text
def _A ( self : Tuple ):
UpperCamelCase :List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase )
UpperCamelCase :Any = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(__lowerCamelCase , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] )
def _A ( self : Optional[Any] ):
UpperCamelCase :List[Any] = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def _A ( self : Union[str, Any] ):
UpperCamelCase :int = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _A ( self : Tuple ):
UpperCamelCase :Any = TransfoXLTokenizer(lower_case=__lowerCamelCase )
UpperCamelCase :Optional[int] = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
UpperCamelCase :Optional[int] = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase )
def _A ( self : List[Any] ):
UpperCamelCase :Any = self.get_tokenizer()
UpperCamelCase :List[str] = len(__lowerCamelCase )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCamelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 38
| 1
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : int ) -> float:
if digit_amount > 0:
return round(number - int(__lowerCamelCase ) , __lowerCamelCase )
return number - int(__lowerCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 40
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
UpperCAmelCase__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_INIT_CONFIGURATION
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = BertTokenizer
def __init__( self : Optional[int] , _lowerCamelCase : int=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=True , _lowerCamelCase : Optional[int]="[UNK]" , _lowerCamelCase : Any="[SEP]" , _lowerCamelCase : Any="[PAD]" , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Dict="[MASK]" , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Any=None , **_lowerCamelCase : int , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
_snake_case = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**_lowerCamelCase )
_snake_case = do_lower_case
def lowercase ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=None ):
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
_snake_case = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 40
| 1
|
'''simple docstring'''
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 237
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = (DPMSolverSDEScheduler,)
__lowercase = 10
def UpperCAmelCase_ ( self :List[Any] , **lowercase_ :Optional[int] )-> str:
A__ = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**lowercase_ )
return config
def UpperCAmelCase_ ( self :int )-> Dict:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCAmelCase_ ( self :Any )-> Optional[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Union[str, Any]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def UpperCAmelCase_ ( self :Optional[int] )-> Dict:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type="v_prediction" )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def UpperCAmelCase_ ( self :Optional[int] )-> List[str]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def UpperCAmelCase_ ( self :Tuple )-> Dict:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ , use_karras_sigmas=lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 237
| 1
|
def _lowerCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Dict = 0 , UpperCAmelCase : Optional[Any] = 0 ):
'''simple docstring'''
UpperCamelCase__ : int =right or len(a__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a__ , a__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCAmelCase ( UpperCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_SCREAMING_SNAKE_CASE : Union[str, Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def _lowerCAmelCase ( UpperCAmelCase : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
UpperCamelCase__ : Union[str, Any] =[]
for num in range(len(UpperCAmelCase ) ):
UpperCamelCase__ : Tuple =0
while 2 * i * i <= odd_composites[num]:
UpperCamelCase__ : Any =odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase ) == n:
return list_nums
return []
def _lowerCAmelCase ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 157
| 0
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case : List[str] = ["text", "image", "audio"]
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_snake_case , _snake_case ):
inputs.append(create_inputs(_snake_case ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def lowerCAmelCase_ ( _snake_case : List ) -> str:
'''simple docstring'''
__magic_name__ : Union[str, Any] = []
for output in outputs:
if isinstance(_snake_case , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_snake_case , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_snake_case , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class _snake_case :
def SCREAMING_SNAKE_CASE ( self ):
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
__magic_name__ : Union[str, Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__magic_name__ : List[Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = create_inputs(self.tool.inputs )
__magic_name__ : List[str] = self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
__magic_name__ : Any = [outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def SCREAMING_SNAKE_CASE ( self ):
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = create_inputs(self.tool.inputs )
__magic_name__ : Dict = self.tool(*_a )
if not isinstance(_a , _a ):
__magic_name__ : List[Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
__magic_name__ : str = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = create_inputs(self.tool.inputs )
__magic_name__ : Optional[Any] = []
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__magic_name__ : List[Any] = self.tool(*_a )
if not isinstance(_a , _a ):
__magic_name__ : str = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 281
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[str] = np.argmax(_snake_case , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_snake_case , encoding="utf_8" ) as f:
__magic_name__ : List[str] = csv.reader(_snake_case )
__magic_name__ : List[Any] = []
next(_snake_case ) # skip the first line
for line in tqdm(_snake_case ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = []
for dataset in encoded_datasets:
__magic_name__ : Union[str, Any] = len(_snake_case )
__magic_name__ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__magic_name__ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
__magic_name__ : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__magic_name__ : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_snake_case ):
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : str = with_conta
__magic_name__ : Tuple = with_conta
__magic_name__ : Union[str, Any] = len(_snake_case ) - 1
__magic_name__ : int = len(_snake_case ) - 1
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[int] = mc_label
__magic_name__ : str = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_snake_case , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_snake_case , default="" )
parser.add_argument("--eval_dataset" , type=_snake_case , default="" )
parser.add_argument("--seed" , type=_snake_case , default=42 )
parser.add_argument("--num_train_epochs" , type=_snake_case , default=3 )
parser.add_argument("--train_batch_size" , type=_snake_case , default=8 )
parser.add_argument("--eval_batch_size" , type=_snake_case , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=_snake_case , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_snake_case , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_snake_case , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_snake_case , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_snake_case , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_snake_case , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_snake_case , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_snake_case , default=0.01 )
parser.add_argument("--lm_coef" , type=_snake_case , default=0.9 )
parser.add_argument("--n_valid" , type=_snake_case , default=374 )
parser.add_argument("--server_ip" , type=_snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_snake_case , default="" , help="Can be used for distant debugging." )
__magic_name__ : List[Any] = parser.parse_args()
print(_snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__magic_name__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__magic_name__ : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_snake_case , _snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__magic_name__ : List[Any] = ["_start_", "_delimiter_", "_classify_"]
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_snake_case )
__magic_name__ : Optional[Any] = tokenizer.convert_tokens_to_ids(_snake_case )
__magic_name__ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_snake_case ) )
model.to(_snake_case )
# Load and encode the datasets
def tokenize_and_encode(_snake_case : str ):
if isinstance(_snake_case , _snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) )
elif isinstance(_snake_case , _snake_case ):
return obj
return [tokenize_and_encode(_snake_case ) for o in obj]
logger.info("Encoding dataset..." )
__magic_name__ : Optional[int] = load_rocstories_dataset(args.train_dataset )
__magic_name__ : str = load_rocstories_dataset(args.eval_dataset )
__magic_name__ : int = (train_dataset, eval_dataset)
__magic_name__ : List[str] = tokenize_and_encode(_snake_case )
# Compute the max input length for the Transformer
__magic_name__ : Optional[Any] = model.config.n_positions // 2 - 2
__magic_name__ : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__magic_name__ : List[str] = min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__magic_name__ : List[Any] = pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case )
__magic_name__ , __magic_name__ : Optional[int] = tensor_datasets[0], tensor_datasets[1]
__magic_name__ : Tuple = TensorDataset(*_snake_case )
__magic_name__ : Union[str, Any] = RandomSampler(_snake_case )
__magic_name__ : Dict = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size )
__magic_name__ : Any = TensorDataset(*_snake_case )
__magic_name__ : Optional[Any] = SequentialSampler(_snake_case )
__magic_name__ : int = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__magic_name__ : Tuple = args.max_steps
__magic_name__ : List[str] = args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1
else:
__magic_name__ : List[str] = len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__magic_name__ : str = list(model.named_parameters() )
__magic_name__ : Dict = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__magic_name__ : str = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__magic_name__ : str = AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__magic_name__ : List[str] = get_linear_schedule_with_warmup(
_snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case )
if args.do_train:
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
__magic_name__ : List[str] = 0
__magic_name__ : Tuple = 0
__magic_name__ : Dict = tqdm(_snake_case , desc="Training" )
for step, batch in enumerate(_snake_case ):
__magic_name__ : Optional[Any] = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = batch
__magic_name__ : Optional[Any] = model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__magic_name__ : List[str] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__magic_name__ : int = "Training loss: {:.2e} lr: {:.2e}".format(_snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__magic_name__ : Dict = model.module if hasattr(_snake_case , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__magic_name__ : List[Any] = os.path.join(args.output_dir , _snake_case )
__magic_name__ : Dict = os.path.join(args.output_dir , _snake_case )
torch.save(model_to_save.state_dict() , _snake_case )
model_to_save.config.to_json_file(_snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__magic_name__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_snake_case )
if args.do_eval:
model.eval()
__magic_name__ , __magic_name__ : Any = 0, 0
__magic_name__ , __magic_name__ : Union[str, Any] = 0, 0
for batch in tqdm(_snake_case , desc="Evaluating" ):
__magic_name__ : int = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = batch
with torch.no_grad():
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = model(
_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Tuple = mc_logits.detach().cpu().numpy()
__magic_name__ : Any = mc_labels.to("cpu" ).numpy()
__magic_name__ : str = accuracy(_snake_case , _snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__magic_name__ : Tuple = eval_loss / nb_eval_steps
__magic_name__ : List[Any] = eval_accuracy / nb_eval_examples
__magic_name__ : int = tr_loss / nb_tr_steps if args.do_train else None
__magic_name__ : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__magic_name__ : int = os.path.join(args.output_dir , "eval_results.txt" )
with open(_snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _snake_case , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 281
| 1
|
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _A ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =multiprocessing.Manager()
a =manager.list()
a =multiprocessing.Process(target=lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
a =shutil.rmtree
a =os.rmdir
a =os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
a ={}
with swallow_io():
with time_limit(lowercase ):
exec(lowercase , lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f'''failed: {e}''' )
# Needed for cleaning up.
a =rmtree
a =rmdir
a =chdir
@contextlib.contextmanager
def _A ( lowercase ):
"""simple docstring"""
def signal_handler(lowercase , lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , lowercase )
signal.signal(signal.SIGALRM , lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _A ( ):
"""simple docstring"""
a =WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase ):
with contextlib.redirect_stderr(lowercase ):
with redirect_stdin(lowercase ):
yield
@contextlib.contextmanager
def _A ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase ):
yield dirname
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
pass
class __A ( io.StringIO ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Dict:
raise OSError
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Optional[Any]:
raise OSError
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Union[str, Any]:
raise OSError
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> List[str]:
return False
class __A ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
__lowerCAmelCase = "stdin"
@contextlib.contextmanager
def _A ( lowercase ):
"""simple docstring"""
if root == ".":
yield
return
a =os.getcwd()
os.chdir(lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase )
def _A ( lowercase=None ):
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
a =None
a =None
import os
a ='''1'''
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
a =None
import shutil
a =None
a =None
a =None
import subprocess
a =None # type: ignore
a =None
import sys
a =None
a =None
a =None
a =None
a =None
| 215
|
"""simple docstring"""
from __future__ import annotations
def _A ( lowercase , lowercase , lowercase , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]=1_3 , lowerCAmelCase_ : int=3_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : List[Any]=3_7 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : List[Any]=1_0 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=2 , ):
"""simple docstring"""
_A: int = parent
_A: List[Any] = batch_size
_A: List[Any] = image_size
_A: Tuple = patch_size
_A: Union[str, Any] = num_channels
_A: Optional[int] = is_training
_A: Tuple = use_labels
_A: Tuple = hidden_size
_A: int = num_hidden_layers
_A: List[str] = num_attention_heads
_A: Dict = intermediate_size
_A: Any = hidden_act
_A: Tuple = hidden_dropout_prob
_A: Optional[int] = attention_probs_dropout_prob
_A: str = type_sequence_label_size
_A: List[Any] = initializer_range
_A: List[Any] = scope
_A: Any = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_A: List[str] = (image_size // patch_size) ** 2
_A: Dict = num_patches + 2
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Optional[int] = None
if self.use_labels:
_A: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A: Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A: int = DeiTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Optional[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
_A: int = DeiTForMaskedImageModeling(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_A: Union[str, Any] = 1
_A: str = DeiTForMaskedImageModeling(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A: Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Dict = self.type_sequence_label_size
_A: int = DeiTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A: Any = 1
_A: Optional[int] = DeiTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A: Optional[Any] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[str] = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) ,
): List[Any] = config_and_inputs
_A: Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Dict = False
__UpperCamelCase : Union[str, Any] = False
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: List[str] = DeiTModelTester(self )
_A: List[str] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: str = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A: List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A , _A: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Tuple = model_class(lowerCAmelCase_ )
_A: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: str = [*signature.parameters.keys()]
_A: Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any]=False ):
"""simple docstring"""
_A: Optional[Any] = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __magic_name__ ( self : Any ):
"""simple docstring"""
if not self.model_tester.is_training:
return
_A , _A: str = self.model_tester.prepare_config_and_inputs_for_common()
_A: Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_A: int = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_A: Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_A: List[Any] = model(**lowerCAmelCase_ ).loss
loss.backward()
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_A: List[str] = False
_A: str = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_A: Tuple = model_class(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase_ )
model.train()
_A: Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_A: Tuple = model(**lowerCAmelCase_ ).loss
loss.backward()
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: str = self.model_tester.prepare_config_and_inputs_for_common()
_A: Optional[int] = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase_ ),
*get_values(lowerCAmelCase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
_A: Tuple = problem_type['''title''']
_A: Optional[Any] = problem_type['''num_labels''']
_A: List[Any] = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_A: Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if problem_type["num_labels"] > 1:
_A: Any = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
_A: Optional[int] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase_ ) as warning_list:
_A: List[Any] = model(**lowerCAmelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A: Any = DeiTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCamelCase__ ( ) -> List[Any]:
_A: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: int = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
lowerCAmelCase_ )
_A: str = self.default_image_processor
_A: Optional[Any] = prepare_img()
_A: Optional[int] = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A: Optional[int] = model(**lowerCAmelCase_ )
# verify the logits
_A: Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A: Dict = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: List[Any] = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
_A: Union[str, Any] = self.default_image_processor
_A: int = prepare_img()
_A: List[str] = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' )
_A: Any = inputs.pixel_values.to(lowerCAmelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_A: Tuple = model(lowerCAmelCase_ )
| 121
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {'vocab_file': 'vocab.txt'}
UpperCAmelCase__ : List[Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
UpperCAmelCase__ : Union[str, Any] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
UpperCAmelCase__ : Dict = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = ConvBertTokenizer
def __init__( self : Tuple , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : List[Any]="[CLS]" , lowerCAmelCase_ : Optional[int]="[MASK]" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_A: List[str] = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_A: List[Any] = do_lower_case
_A: Optional[Any] = strip_accents
_A: Union[str, Any] = tokenize_chinese_chars
_A: Optional[int] = normalizer_class(**lowerCAmelCase_ )
_A: Optional[Any] = do_lower_case
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=None ):
"""simple docstring"""
_A: Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
_A: Any = [self.sep_token_id]
_A: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
_A: str = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 121
| 1
|
from math import sqrt
def UpperCamelCase ( _a ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase ( _a = 1_0_0_0_1 ) -> int:
'''simple docstring'''
lowercase_ :List[str] = 0
lowercase_ :Union[str, Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(_a ):
count += 1
while count != nth:
number += 2
if is_prime(_a ):
count += 1
return number
if __name__ == "__main__":
print(f"{solution() = }")
| 252
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase :
'''simple docstring'''
lowercase : Dict =MBartConfig
lowercase : Union[str, Any] ={}
lowercase : Optional[int] ="""gelu"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
lowercase_ :int = parent
lowercase_ :Any = batch_size
lowercase_ :Any = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Optional[Any] = use_labels
lowercase_ :List[str] = vocab_size
lowercase_ :Union[str, Any] = hidden_size
lowercase_ :Optional[Any] = num_hidden_layers
lowercase_ :Optional[int] = num_attention_heads
lowercase_ :Any = intermediate_size
lowercase_ :str = hidden_dropout_prob
lowercase_ :List[Any] = attention_probs_dropout_prob
lowercase_ :Union[str, Any] = max_position_embeddings
lowercase_ :str = eos_token_id
lowercase_ :List[Any] = pad_token_id
lowercase_ :List[str] = bos_token_id
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase_ :Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ :Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ :Optional[Any] = prepare_mbart_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Tuple = TFMBartModel(config=UpperCamelCase_ ).get_decoder()
lowercase_ :Any = inputs_dict['''input_ids''']
lowercase_ :List[Any] = input_ids[:1, :]
lowercase_ :List[Any] = inputs_dict['''attention_mask'''][:1, :]
lowercase_ :str = inputs_dict['''head_mask''']
lowercase_ :List[str] = 1
# first forward pass
lowercase_ :Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
lowercase_ , lowercase_ :int = outputs.to_tuple()
lowercase_ :List[Any] = past_key_values[1]
def UpperCamelCase ( _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
lowercase_ :Dict = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ :Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ :Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ :Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ :Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[Any] =(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase : Optional[Any] =(TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase : Optional[Any] =(
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : Optional[Any] =True
lowercase : Optional[Any] =False
lowercase : List[str] =False
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = TFMBartModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=UpperCamelCase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
lowercase_ :str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] =[
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowercase : Optional[int] =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowercase : Any ="""facebook/mbart-large-en-ro"""
@cached_property
def UpperCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self ):
lowercase_ :Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Any = self.translate_src_text(**UpperCamelCase_ )
self.assertListEqual(self.expected_text , UpperCamelCase_ )
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Optional[Any] = self.tokenizer(self.src_text , **UpperCamelCase_ , return_tensors='''tf''' )
lowercase_ :Union[str, Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowercase_ :Any = self.tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
return generated_words
@slow
def UpperCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 252
| 1
|
"""simple docstring"""
import argparse
import copy
def lowercase ( A_ )-> Optional[int]:
'''simple docstring'''
a : Optional[Any] = {}
with open(A_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
a : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
a : Optional[int] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
a : List[str] = []
_list.append([line.split()[0], line.split()[2]] )
a : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( A_ , A_ )-> Tuple:
'''simple docstring'''
with open(A_ ) as f:
a : Union[str, Any] = f.read(1 )
a : Tuple = start_node
a : Optional[int] = []
a : List[str] = start_node
a : List[Any] = 0
while visiting not in first_solution:
a : Optional[int] = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(A_ ) and k[0] not in first_solution:
a : List[Any] = k[1]
a : str = k[0]
first_solution.append(A_ )
a : Union[str, Any] = distance_of_first_solution + int(A_ )
a : Dict = best_node
first_solution.append(A_ )
a : List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
a : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def lowercase ( A_ , A_ )-> Dict:
'''simple docstring'''
a : Dict = []
for n in solution[1:-1]:
a : Optional[Any] = solution.index(A_ )
for kn in solution[1:-1]:
a : Tuple = solution.index(A_ )
if n == kn:
continue
a : Any = copy.deepcopy(A_ )
a : Union[str, Any] = kn
a : Optional[int] = n
a : str = 0
for k in _tmp[:-1]:
a : Tuple = _tmp[_tmp.index(A_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
a : Dict = distance + int(i[1] )
_tmp.append(A_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
a : str = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda A_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
a : Dict = 1
a : str = first_solution
a : List[Any] = []
a : Any = distance_of_first_solution
a : List[str] = solution
while count <= iters:
a : int = find_neighborhood(A_ , A_ )
a : Dict = 0
a : List[Any] = neighborhood[index_of_best_solution]
a : Tuple = len(A_ ) - 1
a : List[str] = False
while not found:
a : str = 0
while i < len(A_ ):
if best_solution[i] != solution[i]:
a : Optional[int] = best_solution[i]
a : Tuple = solution[i]
break
a : List[Any] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
a : int = True
a : str = best_solution[:-1]
a : int = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
a : Dict = cost
a : Tuple = solution
else:
a : Optional[Any] = index_of_best_solution + 1
a : Dict = neighborhood[index_of_best_solution]
if len(A_ ) >= size:
tabu_list.pop(0 )
a : Optional[int] = count + 1
return best_solution_ever, best_cost
def lowercase ( A_=None )-> Tuple:
'''simple docstring'''
a : Optional[int] = generate_neighbours(args.File )
a , a : Dict = generate_first_solution(
args.File , A_ )
a , a : Optional[Any] = tabu_search(
A_ , A_ , A_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 40
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline
UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : Optional[int] = False
@property
def __snake_case ( self : Optional[Any]):
return 32
@property
def __snake_case ( self : Dict):
return 32
@property
def __snake_case ( self : Dict):
return self.time_input_dim
@property
def __snake_case ( self : Any):
return self.time_input_dim * 4
@property
def __snake_case ( self : str):
return 100
@property
def __snake_case ( self : str):
torch.manual_seed(0)
a : str = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a : Dict = UNetaDConditionModel(**__UpperCAmelCase)
return model
@property
def __snake_case ( self : str):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Union[str, Any]):
torch.manual_seed(0)
a : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = self.dummy_unet
a : int = self.dummy_movq
a : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , )
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0):
a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__UpperCAmelCase)
# create hint
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
if str(__UpperCAmelCase).startswith("mps"):
a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase)
else:
a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : str = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __snake_case ( self : Dict):
a : str = "cpu"
a : Tuple = self.get_dummy_components()
a : Dict = self.pipeline_class(**__UpperCAmelCase)
a : Optional[int] = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase))
a : Any = output.images
a : Any = pipe(
**self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0]
a : Union[str, Any] = image[0, -3:, -3:, -1]
a : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Tuple = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[str]):
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy")
a : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0
a : str = hint.permute(2 , 0 , 1).unsqueeze(0)
a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__UpperCAmelCase)
a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
a : int = pipeline.to(__UpperCAmelCase)
pipeline.set_progress_bar_config(disable=__UpperCAmelCase)
a : Tuple = "A robot, 4k photo"
a : Any = torch.Generator(device="cuda").manual_seed(0)
a , a : int = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a : str = torch.Generator(device="cuda").manual_seed(0)
a : Union[str, Any] = pipeline(
image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
| 40
| 1
|
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = rotary_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = None
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = FlaxGPTJModelTester(self )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@tooslow
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = False
__lowerCamelCase = model.config.eos_token_id
__lowerCamelCase = jax.jit(model.generate )
__lowerCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
__lowerCamelCase = fx_state
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
with torch.no_grad():
__lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 348
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.